diff --git a/cmd/crio/main.go b/cmd/crio/main.go index 8a782beb75f..b307093d160 100644 --- a/cmd/crio/main.go +++ b/cmd/crio/main.go @@ -14,7 +14,7 @@ import ( "strings" "time" - _ "github.com/containers/podman/v3/pkg/hooks/0.1.0" + _ "github.com/containers/podman/v4/pkg/hooks/0.1.0" "github.com/containers/storage/pkg/reexec" "github.com/cri-o/cri-o/internal/criocli" "github.com/cri-o/cri-o/internal/log" diff --git a/contrib/test/ci/cri-o.spec b/contrib/test/ci/cri-o.spec index 34da67aa583..201589b09de 100644 --- a/contrib/test/ci/cri-o.spec +++ b/contrib/test/ci/cri-o.spec @@ -84,7 +84,7 @@ popd ln -s vendor src export GOPATH=$(pwd)/_output:$(pwd) -export BUILDTAGS="selinux seccomp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_ostree_stub" +export BUILDTAGS="selinux seccomp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_ostree_stub containers_image_openpgp" make bin/crio bin/crio-status bin/pinns # build docs diff --git a/go.mod b/go.mod index a63ba7294bf..c06f1696d4f 100644 --- a/go.mod +++ b/go.mod @@ -8,21 +8,21 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/container-orchestrated-devices/container-device-interface v0.4.0 github.com/containerd/cgroups v1.0.4 - github.com/containerd/containerd v1.5.8 + github.com/containerd/containerd v1.6.4 github.com/containerd/cri-containerd v1.19.0 github.com/containerd/fifo v1.0.0 github.com/containerd/ttrpc v1.1.0 github.com/containerd/typeurl v1.0.2 github.com/containernetworking/cni v1.1.1 github.com/containernetworking/plugins v1.1.1 - github.com/containers/buildah v1.23.1 - github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f + github.com/containers/buildah v1.26.1 + github.com/containers/common v0.48.0 github.com/containers/conmon v2.0.20+incompatible - github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c - github.com/containers/image/v5 v5.17.0 + github.com/containers/conmon-rs v0.0.0-20220609131637-836ba11bf0aa + github.com/containers/image/v5 v5.21.1 github.com/containers/ocicrypt v1.1.5 - github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d - github.com/containers/storage v1.37.0 + github.com/containers/podman/v4 v4.1.0 + github.com/containers/storage v1.41.0 github.com/coreos/go-systemd/v22 v22.3.2 github.com/cpuguy83/go-md2man v1.0.10 github.com/creack/pty v1.1.18 @@ -46,9 +46,9 @@ require ( github.com/onsi/gomega v1.19.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 - github.com/opencontainers/runc v1.1.2 - github.com/opencontainers/runtime-spec v1.0.3-0.20210709190330-896175883324 - github.com/opencontainers/runtime-tools v0.9.1-0.20210326182921-59cdde06764b + github.com/opencontainers/runc v1.1.3 + github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab + github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f github.com/opencontainers/selinux v1.10.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.12.2 @@ -58,14 +58,14 @@ require ( github.com/stretchr/testify v1.7.2 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 github.com/urfave/cli/v2 v2.8.1 - github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54 + github.com/vishvananda/netlink v1.2.1-beta.2 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 go.opentelemetry.io/otel v1.3.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 go.opentelemetry.io/otel/sdk v1.3.0 - golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 - golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a + golang.org/x/net v0.0.0-20220607020251-c690dde0001d + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f + golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 google.golang.org/grpc v1.47.0 k8s.io/api v0.24.1 k8s.io/apimachinery v0.24.1 @@ -83,43 +83,45 @@ require ( ) require ( - capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b // indirect + capnproto.org/go/capnp/v3 v3.0.0-alpha.3 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/hcsshim v0.9.2 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/acomagu/bufpipe v1.0.3 // indirect + github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/aws/aws-sdk-go v1.43.30 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/checkpoint-restore/checkpointctl v0.0.0-20210922093614-c31748bec9f2 // indirect + github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect github.com/cheggaaa/pb/v3 v3.0.8 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cilium/ebpf v0.7.0 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-runc v1.0.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.10.1 // indirect - github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b // indirect - github.com/containers/psgo v1.7.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect + github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect + github.com/containers/psgo v1.7.2 // indirect github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect - github.com/docker/docker v20.10.12+incompatible // indirect + github.com/docker/docker v20.10.14+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc // indirect + github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 // indirect github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 // indirect github.com/emirpasic/gods v1.12.0 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/fsouza/go-dockerclient v1.7.4 // indirect + github.com/fsouza/go-dockerclient v1.7.11 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-git/gcfg v1.5.0 // indirect github.com/go-git/go-billy/v5 v5.3.1 // indirect @@ -142,6 +144,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect @@ -149,16 +152,15 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.1 // indirect - github.com/hpcloud/tail v1.0.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jinzhu/copier v0.3.2 // indirect + github.com/jinzhu/copier v0.3.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kevinburke/ssh_config v1.1.0 // indirect - github.com/klauspost/compress v1.14.2 // indirect + github.com/klauspost/compress v1.15.4 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lithammer/dedent v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -175,31 +177,30 @@ require ( github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.0 // indirect + github.com/moby/sys/mountinfo v0.6.1 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/mtrmac/gpgme v0.1.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/nxadm/tail v1.4.8 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 // indirect - github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 // indirect + github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 // indirect + github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/proglottis/gpgme v0.1.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rootless-containers/rootlesskit v0.14.5 // indirect github.com/russross/blackfriday v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/saschagrunert/go-modiff v1.3.0 // indirect - github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 // indirect + github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shirou/gopsutil/v3 v3.22.1 // indirect github.com/spf13/cobra v1.4.0 // indirect @@ -207,10 +208,11 @@ require ( github.com/spiegel-im-spiegel/errs v1.0.2 // indirect github.com/spiegel-im-spiegel/go-cvss v0.4.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect + github.com/sylabs/sif/v2 v2.7.0 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - github.com/vbauerster/mpb/v7 v7.1.5 // indirect + github.com/vbauerster/mpb/v7 v7.4.1 // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect github.com/xanzy/go-gitlab v0.68.0 // indirect github.com/xanzy/ssh-agent v0.3.0 // indirect @@ -237,7 +239,6 @@ require ( google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index af323ecd782..72a67883746 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,10 @@ 4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= -capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b h1:Dyvj8g9LBROQ6vC1GyfOPrH7GHvZAjMCIGwclTTsFaE= -capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b/go.mod h1:Dynqh9/LE2Wy7jYd2wIoYgqFwh3hZHCmG7j6/uCWX6c= +capnproto.org/go/capnp/v3 v3.0.0-alpha.3 h1:F2d51g9WVp5TzoWIUttjrkNa8u5WQmxCIk/W6YXUoDY= +capnproto.org/go/capnp/v3 v3.0.0-alpha.3/go.mod h1:Dynqh9/LE2Wy7jYd2wIoYgqFwh3hZHCmG7j6/uCWX6c= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -64,6 +65,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -101,6 +103,7 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -124,6 +127,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -133,7 +137,6 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= @@ -146,8 +149,9 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 h1:cSHEbLj0GZeHM1mWG84qEnGFojNEQ83W7cwaPRjcwXU= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -204,7 +208,6 @@ github.com/aws/aws-sdk-go v1.37.6/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zK github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.30 h1:Q3lgrX/tz/MkEiPVVQnOQThBAK2QC2SCTCKTD1mwGFA= github.com/aws/aws-sdk-go v1.43.30/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -219,6 +222,7 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -230,16 +234,14 @@ github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2 github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= +github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= -github.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY= github.com/carolynvs/magex v0.5.0/go.mod h1:i4bMWEmwGw2+W/u/0cEs5FTEyLtaN5DEKvVLV+KOEsc= -github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= @@ -254,12 +256,12 @@ github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/checkpoint-restore/checkpointctl v0.0.0-20210922093614-c31748bec9f2 h1:z7G4H5f1Z/n3di9qnGtKDm6jmP434HD7dIEh3YyLn9I= -github.com/checkpoint-restore/checkpointctl v0.0.0-20210922093614-c31748bec9f2/go.mod h1:yvaQuauIKzvfX/PIqINxWxoOYd35Dk/U2MS8onfkRHU= +github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0 h1:txB5jvhzUCSiiQmqmMWpo5CEB7Gj/Hq5Xqi7eaPl8ko= +github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0/go.mod h1:67kWC1PXQLR3lM/mmNnu3Kzn7K4TSWZAGUuQP1JSngk= github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.1.0/go.mod h1:iaS8bb7p6zKJanp1Qe8mpl7+bnkYBR500psJR6mwma0= +github.com/checkpoint-restore/go-criu/v5 v5.2.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw= @@ -294,7 +296,6 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/container-orchestrated-devices/container-device-interface v0.0.0-20210325223243-f99e8b6c10b9/go.mod h1:eQt66kIaJpUhCrjCtBFQGQxGLbAUl0OuuwjTH16ON4s= github.com/container-orchestrated-devices/container-device-interface v0.4.0 h1:b/mROkfDr1W8fJ25T66iVheHFnWixgyxTOSbO8i7jp4= github.com/container-orchestrated-devices/container-device-interface v0.4.0/go.mod h1:E1zcucIkq9P3eyNmY+68dBQsTcsXJh9cgRo2IVNScKQ= github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= @@ -312,6 +313,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -337,18 +339,22 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig= +github.com/containerd/containerd v1.6.4 h1:SEDZBp10mhCp+hkO3Njz/YhGrI7ah3edNcUlRdUPOgg= +github.com/containerd/containerd v1.6.4/go.mod h1:oWOqbuJUZmOVafhA0lj2NAXbiO1u7F0K5l1bUgdyo94= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/cri-containerd v1.19.0 h1:PcTvvl+SHaekCMQZFQkYjn1RKlYrK6khYbuhOeF68k0= github.com/containerd/cri-containerd v1.19.0/go.mod h1:wxbGdReWGCalzGOEpifoHeYCK4xAgnj4o/4bVB+9voU= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -360,6 +366,10 @@ github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.5/go.mod h1:Rf2ZrMycr1El589IyuRzn7RkfdRZVKaFGaxSDHVAjj0= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -370,15 +380,17 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b583765b/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU= github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= -github.com/containerd/stargz-snapshotter/estargz v0.10.1 h1:hd1EoVjI2Ax8Cr64tdYqnJ4i4pZU49FkEf5kU8KxQng= github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= +github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -400,6 +412,7 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.0/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= @@ -407,38 +420,41 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= -github.com/containers/buildah v1.23.1 h1:Tpc9DsRuU+0Oofewpxb6OJVNQjCu7yloN/obUqzfDTY= -github.com/containers/buildah v1.23.1/go.mod h1:4WnrN0yrA7ab0ppgunixu2WM1rlD2rG8QLJAKbEkZlQ= -github.com/containers/common v0.44.2/go.mod h1:7sdP4vmI5Bm6FPFxb3lvAh1Iktb6tiO1MzjUzhxdoGo= -github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f h1:vVmx51AzWvB4/ao2zyR6s053a1leLTOh+zsOPVWQRgA= -github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f/go.mod h1:aml/OO4FmYfPbfT87rvWiCgkLzTdqO6PuZ/xXq6bPbk= +github.com/containers/buildah v1.26.1 h1:D65Vuo+orsI14WWtJhSX6KrpgBBa7+hveVWevzG8p8E= +github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY= +github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw= +github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c h1:x6gYpFEXrLN+4+NgGg8QkmkGrtujHKgdUgoGY9oUtjU= -github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c/go.mod h1:YogXfQJ+PfB6Dsc/tZDoTazDBdWiAUYnyElblkEtALo= +github.com/containers/conmon-rs v0.0.0-20220609131637-836ba11bf0aa h1:JK8bFwd4ox8dnPLw3xaInas/axoLLIPOWPiYnEEPG3k= +github.com/containers/conmon-rs v0.0.0-20220609131637-836ba11bf0aa/go.mod h1:XgdukEWM2ieOsm7jA83jfrxbk5fvBaIkWf+vdaOKQcs= github.com/containers/image/v5 v5.10.4/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII= -github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4= -github.com/containers/image/v5 v5.17.0 h1:KS5pro80CCsSp5qDBTMmSAWQo+xcBX19zUPExmYX2OQ= -github.com/containers/image/v5 v5.17.0/go.mod h1:GnYVusVRFPMMTAAUkrcS8NNSpBp8oyrjOUe04AAmRr4= -github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q= +github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.4/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ= github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= -github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d h1:cztBXICnZeM0Cz/7IzbLbMs8jYCnJmil94NJszFYNXA= -github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d/go.mod h1:RvfQgiFLco33brSGNga2tLZifqKiUu8n6OxnjDTPUb4= -github.com/containers/psgo v1.7.1 h1:2N6KADeFvBm1aI2iXxu6+/Xh7CCkdh8p8F3F/cpIU5I= -github.com/containers/psgo v1.7.1/go.mod h1:mWGpFzW73qWFA+blhF6l7GuKzbrACkYgr/ajiNQR+RM= -github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM= +github.com/containers/podman/v4 v4.1.0 h1:3fo/4U2fjdeKB86RpyJDqyglg5G6TkDuAU9jECv8Z9A= +github.com/containers/podman/v4 v4.1.0/go.mod h1:ZgZCaL1EAnRXPbCUVQ3P24UZ+uGAGUTXLysvEBwpmkY= +github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc= +github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0= github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= -github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20= -github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8= -github.com/containers/storage v1.37.0 h1:HVhDsur6sx889ZIZ1d1kEiOzv3gsr5q0diX2VZmOdSg= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= +github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc= +github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= +github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= +github.com/containers/storage v1.41.0 h1:IsoAJ1q3s/jfHB7eoiRhvTRTcuV+ywY3CkbbgKtT5Bo= +github.com/containers/storage v1.41.0/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -472,7 +488,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.15/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cri-o/ocicni v0.4.0 h1:FKTiq4CESW5OfqM3M87KyHApcawtWw5EwufMiZyXYb0= @@ -497,7 +512,7 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/go-libvirt v0.0.0-20201209184759-e2a69bcd5bd1/go.mod h1:QS1XzqZLcDniNYrN7EZefq3wIyb/M2WmJbql4ZKoc1Q= -github.com/digitalocean/go-qemu v0.0.0-20210209191958-152a1535e49f/go.mod h1:IetBE52JfFxK46p2n2Rqm+p5Gx1gpu2hRHsrbnPOWZQ= +github.com/digitalocean/go-qemu v0.0.0-20210326154740-ac9e0b687001/go.mod h1:IetBE52JfFxK46p2n2Rqm+p5Gx1gpu2hRHsrbnPOWZQ= github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= @@ -510,23 +525,24 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6 github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.14+incompatible h1:+T9/PRYWNDo5SZl5qS1r9Mo/0Q8AwxKKPtu9S1yxM0w= +github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc h1:/A+mPcpajLsWiX9gSnzdVKM/IzZoYiNqXHe83z50k2c= -github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= +github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 h1:YcvzLmdrP/b8kLAGJ8GT7bdncgCAiWxJZIlt84D+RJg= +github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4= @@ -578,15 +594,15 @@ github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.13.1/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsouza/go-dockerclient v1.7.4 h1:daYb0km2a91aNt2KTc4AEcTwgExYtQXHhkt5mjdRD1o= -github.com/fsouza/go-dockerclient v1.7.4/go.mod h1:het+LPt7NaTEVGgwXJAKxPn77RZrQKb2EXJb4e+BHv0= +github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= +github.com/fsouza/go-dockerclient v1.7.11 h1:pRmGMANAl+tmr+IYNYq8IWWcSbiKQMSRumYLv8H5sfk= +github.com/fsouza/go-dockerclient v1.7.11/go.mod h1:zvYxutUNOK853i1s7VywZxQgxSHbm7A6en/q9MHBN6k= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= @@ -596,7 +612,6 @@ github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSy github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.2-0.20181113160402-cbabf5414432/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g= @@ -683,7 +698,6 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.5/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -698,8 +712,6 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gojuno/minimock/v3 v3.0.4/go.mod h1:HqeqnwV8mAABn3pO5hqF+RE7gjA0jsN8cbbSogoGrzI= -github.com/gojuno/minimock/v3 v3.0.8/go.mod h1:TPKxc8tiB8O83YH2//pOzxvEjaI3TMhd6ev/GmlMiYA= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -799,15 +811,10 @@ github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCj github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI= -github.com/google/go-tpm v0.2.1-0.20200615092505-5d8a91de9ae3/go.mod h1:iVLWvrPp/bHeEkxTFi9WG6K9w0iy2yIszHwZGHPbzAw= -github.com/google/go-tpm-tools v0.0.0-20190906225433-1614c142f845/go.mod h1:AVfHadzbdzHo54inR2x1v640jdi1YSi3NauM2DUsxk0= -github.com/google/goexpect v0.0.0-20191001010744-5b6988669ffa/go.mod h1:qtE5aAEkt0vOSA84DBh8aJsz6riL8ONfqfULY7lBjqc= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= github.com/google/licenseclassifier/v2 v2.0.0-alpha.1/go.mod h1:YAgBGGTeNDMU+WfIgaFvjZe4rudym4f6nIn8ZH5X+VM= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -835,7 +842,7 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU= github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= -github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -859,6 +866,7 @@ github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1: github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -941,9 +949,6 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs= -github.com/hexdigest/gowrap v1.1.8/go.mod h1:H/JiFmQMp//tedlV8qt2xBdGzmne6bpbaSuiHmygnMw= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= @@ -960,9 +965,7 @@ github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/insomniacslk/dhcp v0.0.0-20210120172423-cc9239ac6294/go.mod h1:TKl4jN3Voofo4UJIicyNhWGp/nlQqQkFxmwIFTvBkKI= -github.com/insomniacslk/dhcp v0.0.0-20211209223715-7d93572ebe8e/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E= -github.com/intel-go/cpuid v0.0.0-20200819041909-2aa72927c3e2/go.mod h1:RmeVYf9XrPRbRc3XIx0gLYA8qOFvNoPOfaEZduRlEp4= +github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E= github.com/intel/goresctrl v0.2.0 h1:JyZjdMQu9Kl/wLXe9xA6s1X+tF6BWsQPFGJMEeCfWzE= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= @@ -978,8 +981,8 @@ github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7H github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w= -github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1020,7 +1023,6 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kaey/framebuffer v0.0.0-20140402104929-7b385489a1ff/go.mod h1:tS4qtlcKqtt3tCIHUflVSqeP3CLH5Qtv2szX9X2SyhU= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= @@ -1033,8 +1035,6 @@ github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.10.6/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -1042,11 +1042,13 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ= +github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1058,7 +1060,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1110,7 +1111,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -1139,7 +1139,6 @@ github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= @@ -1190,6 +1189,7 @@ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= +github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -1200,9 +1200,12 @@ github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+S github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.1 h1:+H/KnGEAGRpTrEAqNVQ2AM3SiwMgJUt/TXj+Z8cmCIc= +github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= @@ -1225,7 +1228,6 @@ github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1: github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s= github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -1241,6 +1243,7 @@ github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4N github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= @@ -1300,8 +1303,10 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20200206005212-79b036d80240/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1310,49 +1315,48 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw= -github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20201121164853-7413a7f753e1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210709190330-896175883324 h1:EPCbQw9GYqjcp8Y5Oz/RzEO3hlxA1MiQJ3DiYgzzUqg= -github.com/opencontainers/runtime-spec v1.0.3-0.20210709190330-896175883324/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab h1:YQZXa3elcHgKXAa2GjVFC9M3JeP7ZPyFD1YByDx/dgQ= +github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.0.0-20190417131837-cd1349b7c47e/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/runtime-tools v0.9.1-0.20210326182921-59cdde06764b h1:BDT83NWjBPS3Esj/BS1Cf9F4NYzYmiitJZ0TlXx5M4w= -github.com/opencontainers/runtime-tools v0.9.1-0.20210326182921-59cdde06764b/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f h1:MMcsVl0FAVEahmXTy+uXoDTw3yJq7nGrK8ITs/kkreo= +github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4= github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4= -github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 h1:bfLqBGqF04GAoqbMkSrd5VPk/t66OnP0bTTisMaF4ro= +github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/orangecms/go-framebuffer v0.0.0-20200613202404-a0700d90c330/go.mod h1:3Myb/UszJY32F2G7yGkUtcW/ejHpjlGfYLim7cv2uKA= -github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/getopt/v2 v2.1.0/go.mod h1:4NtW75ny4eBw9fO1bhtNdYTlZKYX5/tBLtsOpwKIKd0= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -1363,7 +1367,6 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= -github.com/pierrec/lz4/v4 v4.1.11/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1372,7 +1375,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1384,6 +1386,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ= +github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -1391,6 +1395,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -1437,8 +1442,6 @@ github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mo github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/rck/unit v0.0.3/go.mod h1:jTOnzP4s1OjIP1vdxb4n76b23QPKS4EurYg7sYMr2DM= -github.com/rekby/gpt v0.0.0-20200219180433-a930afbc6edc/go.mod h1:scrOqOnnHVKCHENvFw8k9ajCb88uqLQDA4BvuJNJ2ew= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= @@ -1447,12 +1450,9 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1-0.20210923151022-86f73c517451/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rootless-containers/rootlesskit v0.14.5 h1:X4eNt2e1h/uSjlssKqpeTY5fatrjDz9F9FX05RJB7Tw= -github.com/rootless-containers/rootlesskit v0.14.5/go.mod h1:Ai3detLzryb/4EkzXmNfh8aByUcBXp/qqkQusJs1SO8= +github.com/rootless-containers/rootlesskit v1.0.1/go.mod h1:t2UAiYagxrJ+wmpFAUIZPcqsm4k2B7ve6g7lILKbloc= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1466,7 +1466,6 @@ github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoL github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= @@ -1478,10 +1477,12 @@ github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= github.com/sendgrid/rest v2.6.2+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= @@ -1576,9 +1577,12 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk= +github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1605,16 +1609,11 @@ github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2Ogf github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/twitchtv/twirp v5.8.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= -github.com/u-root/gobusybox/src v0.0.0-20220120221406-4e2fbb8f41bf/go.mod h1:n6S68wL67GT3KLZWmLzrJCr1rMLQ6xzA9rv2k46kF2Q= -github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a/go.mod h1:RWIgJWqm9/0gjBZ0Hl8iR6MVGzZ+yAda2uqqLmetE2I= -github.com/u-root/u-root v0.8.1-0.20220307150114-d511ded1d944/go.mod h1:fZLkJyQavL51QSfSFTbkxtTy0UJYQcLs5O1z3aXosfA= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/u-root/uio v0.0.0-20210528114334-82958018845c/go.mod h1:LpEX5FO/cB+WF4TYGY1V5qktpaZLkKkSegbr0V4eYXA= -github.com/u-root/uio v0.0.0-20210528151154-e40b768296a7/go.mod h1:LpEX5FO/cB+WF4TYGY1V5qktpaZLkKkSegbr0V4eYXA= -github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1622,12 +1621,14 @@ github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lP github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs= github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4= github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= @@ -1644,26 +1645,23 @@ github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpc github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= -github.com/vbauerster/mpb/v6 v6.0.4/go.mod h1:a/+JT57gqh6Du0Ay5jSR+uBMfXGdlR7VQlGP52fJxLM= -github.com/vbauerster/mpb/v7 v7.1.3/go.mod h1:X5GlohZw2fIpypMXWaKart+HGSAjpz49skxkDk+ZL7c= -github.com/vbauerster/mpb/v7 v7.1.4/go.mod h1:4zulrZfvshMOnd2APiHgWS9Yrw08AzZVRr9G11tkpcQ= -github.com/vbauerster/mpb/v7 v7.1.5 h1:vtUEUfQHmNeJETyF4AcRCOV6RC4wqFwNORy52UMXPbQ= -github.com/vbauerster/mpb/v7 v7.1.5/go.mod h1:4M8+qAoQqV60WDNktBM5k05i1iTrXE7rjKOHEVkVlec= +github.com/vbauerster/mpb/v7 v7.4.1 h1:NhLMWQ3gNg2KJR8oeA9lO8Xvq+eNPmixDmB6JEQOUdA= +github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo= github.com/vdemeester/k8s-pkg-credentialprovider v1.18.1-0.20201019120933-f1d16962a4db/go.mod h1:grWy0bkr1XO6hqbaaCKaPXqkBVlMGHYG6PGykktwbJc= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54 h1:8mhqcHPqTMhSPoslhGYihEgSfc77+7La1P6kiB6+9So= -github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20220115184804-dd687eb2f2d4/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/vtolstov/go-ioctl v0.0.0-20151206205506-6be9cced4810/go.mod h1:dF0BBJ2YrV1+2eAIyEI+KeSidgA6HqoIP1u5XTlMq/o= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/go-gitlab v0.43.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= @@ -1747,6 +1745,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdm go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= @@ -1804,9 +1803,9 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1857,7 +1856,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1929,12 +1927,14 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1969,8 +1969,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2023,7 +2023,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200121082415-34d275377bf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2051,14 +2050,12 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2075,6 +2072,7 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2093,34 +2091,30 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210925032602-92d5a993a665/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 h1:z8Hj/bl9cOV2grsOpEaQFUaly0JWN3i97mo3jXKJNp0= +golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20210916214954-140adaaadfaf/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2139,6 +2133,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2165,7 +2160,6 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -2356,7 +2350,6 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= @@ -2389,7 +2382,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= @@ -2453,6 +2445,7 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= @@ -2500,6 +2493,7 @@ k8s.io/system-validators v1.7.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0V k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2513,12 +2507,10 @@ mvdan.cc/editorconfig v0.2.0/go.mod h1:lvnnD3BNdBYkhq+B4uBuFFKatfp02eB6HixDvEz91 mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/sh/v3 v3.4.1/go.mod h1:p/tqPPI4Epfk2rICAe2RoaNd8HBSJ8t9Y2DA9yQlbzY= mvdan.cc/sh/v3 v3.5.1 h1:hmP3UOw4f+EYexsJjFxvU38+kn+V/s2CclXHanIBkmQ= mvdan.cc/sh/v3 v3.5.1/go.mod h1:1JcoyAKm1lZw/2bZje/iYKWicU/KMd0rsyJeKHnsK4E= mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -pack.ag/tftp v1.0.1-0.20181129014014-07909dfbde3c/go.mod h1:N1Pyo5YG+K90XHoR2vfLPhpRuE8ziqbgMn/r/SghZas= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= @@ -2539,6 +2531,7 @@ sigs.k8s.io/release-utils v0.2.0/go.mod h1:9O5livl2h3Q56jUkoZ7UnV22XVRB6MuD4l/51 sigs.k8s.io/release-utils v0.7.0 h1:W8+0glcVHbBUpeQ9V31zKyzSh5V/06XRlWBHlyMcpkw= sigs.k8s.io/release-utils v0.7.0/go.mod h1:SK+/kkc2i7ZO0CFXDCvXpzIZyt13cPlscaApaZD7VmU= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= @@ -2548,4 +2541,3 @@ sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/zeitgeist v0.3.0 h1:vCdINPGoPEa9/jRUFBuIorptKZe/KoTqf8hKdyeLX6M= sigs.k8s.io/zeitgeist v0.3.0/go.mod h1:JM9oeWgqf0Vhrkfaj1sHrKPYECxIjrorv7FdO2RvWok= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -src.elv.sh v0.16.0-rc1.0.20220116211855-fda62502ad7f/go.mod h1:kPbhv5+fBeUh85nET3wWhHGUaUQ64nZMJ8FwA5v5Olg= diff --git a/internal/config/cgmgr/cgmgr.go b/internal/config/cgmgr/cgmgr.go index 137171dd17f..99bf244f280 100644 --- a/internal/config/cgmgr/cgmgr.go +++ b/internal/config/cgmgr/cgmgr.go @@ -10,7 +10,7 @@ import ( "strconv" "strings" - "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/common/pkg/cgroups" "github.com/cri-o/cri-o/internal/config/node" libctr "github.com/opencontainers/runc/libcontainer/cgroups" rspec "github.com/opencontainers/runtime-spec/specs-go" diff --git a/internal/config/cgmgr/cgroupfs.go b/internal/config/cgmgr/cgroupfs.go index f837f5015fd..39b89fa481d 100644 --- a/internal/config/cgmgr/cgroupfs.go +++ b/internal/config/cgmgr/cgroupfs.go @@ -9,7 +9,7 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/common/pkg/cgroups" "github.com/cri-o/cri-o/internal/config/node" "github.com/cri-o/cri-o/utils" libctr "github.com/opencontainers/runc/libcontainer/cgroups" diff --git a/internal/config/cgmgr/stats.go b/internal/config/cgmgr/stats.go index fd160034078..327f0be3e17 100644 --- a/internal/config/cgmgr/stats.go +++ b/internal/config/cgmgr/stats.go @@ -9,7 +9,7 @@ import ( "syscall" "time" - "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/common/pkg/cgroups" "github.com/cri-o/cri-o/internal/config/node" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/internal/config/cgmgr/systemd.go b/internal/config/cgmgr/systemd.go index 0673e5bfd9b..2c5688d3bc4 100644 --- a/internal/config/cgmgr/systemd.go +++ b/internal/config/cgmgr/systemd.go @@ -9,7 +9,7 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/pkg/rootless" systemdDbus "github.com/coreos/go-systemd/v22/dbus" "github.com/cri-o/cri-o/internal/config/node" "github.com/cri-o/cri-o/internal/dbusmgr" diff --git a/internal/config/device/device.go b/internal/config/device/device.go index 9807df7df1b..f91d38973e6 100644 --- a/internal/config/device/device.go +++ b/internal/config/device/device.go @@ -3,7 +3,7 @@ package device import ( "strings" - createconfig "github.com/containers/podman/v3/pkg/specgen/generate" + createconfig "github.com/containers/podman/v4/pkg/specgen/generate" "github.com/opencontainers/runc/libcontainer/devices" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" diff --git a/internal/config/node/cgroups.go b/internal/config/node/cgroups.go index e3bfda5c25c..5e59415a772 100644 --- a/internal/config/node/cgroups.go +++ b/internal/config/node/cgroups.go @@ -8,7 +8,7 @@ import ( "path/filepath" "sync" - podmancgroups "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/common/pkg/cgroups" libctrcgroups "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/pkg/errors" ) @@ -28,7 +28,7 @@ var ( func CgroupIsV2() bool { var cgroupIsV2 bool - cgroupIsV2, cgroupIsV2Err = podmancgroups.IsCgroup2UnifiedMode() + cgroupIsV2, cgroupIsV2Err = cgroups.IsCgroup2UnifiedMode() return cgroupIsV2 } diff --git a/internal/config/seccomp/seccomp.go b/internal/config/seccomp/seccomp.go index 688178f915a..d526e55cc4b 100644 --- a/internal/config/seccomp/seccomp.go +++ b/internal/config/seccomp/seccomp.go @@ -30,7 +30,7 @@ func DefaultProfile() *seccomp.Seccomp { const ( unshareName = "unshare" unshareParentStructIndex = 1 - unshareIndex = 347 + unshareIndex = 360 ) prof := seccomp.DefaultProfile() // We know the default profile at compile time diff --git a/internal/factory/container/container.go b/internal/factory/container/container.go index fa416c8fb29..14fafab0a3a 100644 --- a/internal/factory/container/container.go +++ b/internal/factory/container/container.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/containers/podman/v3/pkg/annotations" + "github.com/containers/podman/v4/pkg/annotations" "github.com/containers/storage/pkg/stringid" "github.com/cri-o/cri-o/internal/config/capabilities" "github.com/cri-o/cri-o/internal/config/device" diff --git a/internal/factory/container/container_test.go b/internal/factory/container/container_test.go index 54dd0854231..b2857c170cd 100644 --- a/internal/factory/container/container_test.go +++ b/internal/factory/container/container_test.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "github.com/containers/podman/v3/pkg/annotations" + "github.com/containers/podman/v4/pkg/annotations" "github.com/cri-o/cri-o/internal/config/capabilities" "github.com/cri-o/cri-o/internal/hostport" "github.com/cri-o/cri-o/internal/lib" diff --git a/internal/lib/container_server.go b/internal/lib/container_server.go index b819426f8ef..204e3fa1174 100644 --- a/internal/lib/container_server.go +++ b/internal/lib/container_server.go @@ -7,8 +7,8 @@ import ( "sync" "time" - "github.com/containers/podman/v3/pkg/annotations" - "github.com/containers/podman/v3/pkg/hooks" + "github.com/containers/podman/v4/pkg/annotations" + "github.com/containers/podman/v4/pkg/hooks" cstorage "github.com/containers/storage" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/truncindex" diff --git a/internal/lib/container_server_test.go b/internal/lib/container_server_test.go index d9c5ac01595..3b526e29e7a 100644 --- a/internal/lib/container_server_test.go +++ b/internal/lib/container_server_test.go @@ -7,7 +7,7 @@ import ( "os" "time" - "github.com/containers/podman/v3/pkg/annotations" + "github.com/containers/podman/v4/pkg/annotations" "github.com/cri-o/cri-o/internal/lib" "github.com/cri-o/cri-o/internal/oci" libconfig "github.com/cri-o/cri-o/pkg/config" diff --git a/internal/oci/container.go b/internal/oci/container.go index dcd9bd7af2e..7305ec1aa99 100644 --- a/internal/oci/container.go +++ b/internal/oci/container.go @@ -11,8 +11,8 @@ import ( "syscall" "time" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/signal" - "github.com/containers/podman/v3/pkg/cgroups" "github.com/containers/storage/pkg/idtools" "github.com/cri-o/cri-o/internal/config/nsmgr" ann "github.com/cri-o/cri-o/pkg/annotations" diff --git a/internal/oci/oci.go b/internal/oci/oci.go index b377c9a9eb1..e7f60d30e6d 100644 --- a/internal/oci/oci.go +++ b/internal/oci/oci.go @@ -6,7 +6,6 @@ import ( "io" "os" "path/filepath" - "strings" "sync" "syscall" "time" @@ -385,41 +384,3 @@ type ExecSyncError struct { func (e *ExecSyncError) Error() string { return fmt.Sprintf("command error: %+v, stdout: %s, stderr: %s, exit code %d", e.Err, e.Stdout.Bytes(), e.Stderr.Bytes(), e.ExitCode) } - -// BuildContainerdBinaryName() is responsible for ensuring the binary passed will -// be properly converted to the containerd binary naming pattern. -// -// This method should never ever be called from anywhere else but the runtimeVM, -// and the only reason its exported here is in order to get some test coverage. -func BuildContainerdBinaryName(path string) string { - // containerd expects the runtime name to be in the following pattern: - // ($dir.)?$prefix.$name.$version - // -------- ------ ----- --------- - // | | | | - // v | | | - // "/usr/local/bin" | | - // (optional) | | | - // v | | - // "containerd.shim." | - // | | - // v | - // "kata-qemu" | - // v - // "v2" - const expectedPrefix = "containerd-shim-" - const expectedVersion = "-v2" - - const binaryPrefix = "containerd.shim" - const binaryVersion = "v2" - - runtimeDir := filepath.Dir(path) - // This is only safe to do because the runtime_path, for the VM runtime_type, is validated in the config, - // allowing us to take the liberty to simply go ahead and check, without having to ensure we're receiving - // the binary in the expected form. - // - // For clarity, it could be ensured twice, but we count on the developer to never ever call this function - // in a different context from the one used in the runtime_vm.go file. - runtimeName := strings.SplitAfter(strings.Split(filepath.Base(path), expectedVersion)[0], expectedPrefix)[1] - - return filepath.Join(runtimeDir, fmt.Sprintf("%s.%s.%s", binaryPrefix, runtimeName, binaryVersion)) -} diff --git a/internal/oci/oci_test.go b/internal/oci/oci_test.go index 624222c56da..4c65b68f092 100644 --- a/internal/oci/oci_test.go +++ b/internal/oci/oci_test.go @@ -172,26 +172,4 @@ var _ = t.Describe("Oci", func() { Expect(privileged).To(Equal(false)) }) }) - - t.Describe("BuildContainerdBinaryName", func() { - It("Simple binary name (containerd-shim-kata-v2)", func() { - binaryName := oci.BuildContainerdBinaryName("containerd-shim-kata-v2") - Expect(binaryName).To(Equal("containerd.shim.kata.v2")) - }) - - It("Full binary path with a simple binary name (/usr/bin/containerd-shim-kata-v2)", func() { - binaryName := oci.BuildContainerdBinaryName("/usr/bin/containerd-shim-kata-v2") - Expect(binaryName).To(Equal("/usr/bin/containerd.shim.kata.v2")) - }) - - It("Composed binary name (containerd-shim-kata-qemu-with-dax-support-v2)", func() { - binaryName := oci.BuildContainerdBinaryName("containerd-shim-kata-qemu-with-dax-support-v2") - Expect(binaryName).To(Equal("containerd.shim.kata-qemu-with-dax-support.v2")) - }) - - It("Full binary path with a composed binary name (/usr/bin/containerd-shim-kata-v2)", func() { - binaryName := oci.BuildContainerdBinaryName("/usr/bin/containerd-shim-kata-qemu-with-dax-support-v2") - Expect(binaryName).To(Equal("/usr/bin/containerd.shim.kata-qemu-with-dax-support.v2")) - }) - }) }) diff --git a/internal/oci/runtime_pod.go b/internal/oci/runtime_pod.go index 5ba26148435..49a6eb77ea7 100644 --- a/internal/oci/runtime_pod.go +++ b/internal/oci/runtime_pod.go @@ -11,7 +11,7 @@ import ( conmonClient "github.com/containers/conmon-rs/pkg/client" conmonconfig "github.com/containers/conmon/runner/config" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/cri-o/cri-o/pkg/config" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" diff --git a/internal/oci/runtime_vm.go b/internal/oci/runtime_vm.go index 8b7f882ffbf..d50ebe6caf3 100644 --- a/internal/oci/runtime_vm.go +++ b/internal/oci/runtime_vm.go @@ -191,20 +191,16 @@ func (r *runtimeVM) startRuntimeDaemon(ctx context.Context, c *Container) error } args = append(args, "start") - // Modify the runtime path so that it complies with v2 shim API - newRuntimePath := BuildContainerdBinaryName(r.path) - r.ctx = namespaces.WithNamespace(r.ctx, namespaces.Default) // Prepare the command to exec cmd, err := client.Command( r.ctx, - newRuntimePath, - "", - "", - c.BundlePath(), - nil, - args..., + &client.CommandConfig{ + Runtime: r.path, + Path: c.BundlePath(), + Args: args, + }, ) if err != nil { return err diff --git a/internal/storage/image.go b/internal/storage/image.go index b68c471f10b..c26254ee63b 100644 --- a/internal/storage/image.go +++ b/internal/storage/image.go @@ -21,7 +21,7 @@ import ( "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/storage" "github.com/containers/storage/pkg/reexec" systemdDbus "github.com/coreos/go-systemd/v22/dbus" diff --git a/internal/storage/image_test.go b/internal/storage/image_test.go index 3ad4d59fd14..bab3b20099c 100644 --- a/internal/storage/image_test.go +++ b/internal/storage/image_test.go @@ -6,7 +6,6 @@ import ( "os" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/pkg/rootless" cs "github.com/containers/storage" "github.com/cri-o/cri-o/internal/storage" containerstoragemock "github.com/cri-o/cri-o/test/mocks/containerstorage" @@ -105,19 +104,6 @@ var _ = t.Describe("Image", func() { Expect(err).To(BeNil()) Expect(imageService).NotTo(BeNil()) }) - - It("should fail to retrieve an image service without storage", func() { - // Given - storeOptions, err := cs.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) - Expect(err).To(BeNil()) - storeOptions.GraphRoot = "" - - // When - _, err = cs.GetStore(storeOptions) - - // Then - Expect(err).NotTo(BeNil()) - }) }) t.Describe("GetStore", func() { diff --git a/pkg/config/config.go b/pkg/config/config.go index a94507fcaa9..4396b983501 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -15,8 +15,8 @@ import ( conmonconfig "github.com/containers/conmon/runner/config" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/pkg/hooks" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/pkg/hooks" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/storage" "github.com/cri-o/cri-o/internal/config/apparmor" "github.com/cri-o/cri-o/internal/config/blockio" diff --git a/server/container_create_linux.go b/server/container_create_linux.go index bd059f566b7..b2ed4942be4 100644 --- a/server/container_create_linux.go +++ b/server/container_create_linux.go @@ -10,8 +10,8 @@ import ( "github.com/containers/buildah/util" "github.com/containers/common/pkg/subscriptions" - "github.com/containers/podman/v3/pkg/rootless" - selinux "github.com/containers/podman/v3/pkg/selinux" + "github.com/containers/podman/v4/pkg/rootless" + selinux "github.com/containers/podman/v4/pkg/selinux" cstorage "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" diff --git a/server/sandbox_run_linux.go b/server/sandbox_run_linux.go index 2ff03847a44..d2dd96a6701 100644 --- a/server/sandbox_run_linux.go +++ b/server/sandbox_run_linux.go @@ -10,9 +10,9 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types" current "github.com/containernetworking/cni/pkg/types/100" - "github.com/containers/podman/v3/pkg/annotations" - "github.com/containers/podman/v3/pkg/rootless" - selinux "github.com/containers/podman/v3/pkg/selinux" + "github.com/containers/podman/v4/pkg/annotations" + "github.com/containers/podman/v4/pkg/rootless" + selinux "github.com/containers/podman/v4/pkg/selinux" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/cri-o/cri-o/internal/config/node" diff --git a/test/copyimg/copyimg.go b/test/copyimg/copyimg.go index 3b8d7aff73a..69d61bb58cf 100644 --- a/test/copyimg/copyimg.go +++ b/test/copyimg/copyimg.go @@ -9,7 +9,7 @@ import ( "github.com/containers/image/v5/storage" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/pkg/rootless" sstorage "github.com/containers/storage" "github.com/containers/storage/pkg/reexec" "github.com/cri-o/cri-o/internal/log" diff --git a/test/mocks/containerstorage/containerstorage.go b/test/mocks/containerstorage/containerstorage.go index ff0cd9139cd..6ba82870586 100644 --- a/test/mocks/containerstorage/containerstorage.go +++ b/test/mocks/containerstorage/containerstorage.go @@ -40,6 +40,20 @@ func (m *MockStore) EXPECT() *MockStoreMockRecorder { return m.recorder } +// AddNames mocks base method. +func (m *MockStore) AddNames(arg0 string, arg1 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddNames", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddNames indicates an expected call of AddNames. +func (mr *MockStoreMockRecorder) AddNames(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNames", reflect.TypeOf((*MockStore)(nil).AddNames), arg0, arg1) +} + // ApplyDiff mocks base method. func (m *MockStore) ApplyDiff(arg0 string, arg1 io.Reader) (int64, error) { m.ctrl.T.Helper() @@ -945,6 +959,20 @@ func (mr *MockStoreMockRecorder) PutLayer(arg0, arg1, arg2, arg3, arg4, arg5, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutLayer", reflect.TypeOf((*MockStore)(nil).PutLayer), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } +// RemoveNames mocks base method. +func (m *MockStore) RemoveNames(arg0 string, arg1 []string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveNames", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveNames indicates an expected call of RemoveNames. +func (mr *MockStoreMockRecorder) RemoveNames(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveNames", reflect.TypeOf((*MockStore)(nil).RemoveNames), arg0, arg1) +} + // RunRoot mocks base method. func (m *MockStore) RunRoot() string { m.ctrl.T.Helper() diff --git a/utils/utils.go b/utils/utils.go index aa297a74e2d..afd2094c111 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/containers/podman/v3/pkg/lookup" + "github.com/containers/podman/v4/pkg/lookup" "github.com/cri-o/cri-o/internal/dbusmgr" "github.com/cri-o/cri-o/utils/cmdrunner" securejoin "github.com/cyphar/filepath-securejoin" diff --git a/utils/utils_test.go b/utils/utils_test.go index 6ebc1a5e09d..9d66c9c256e 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -6,7 +6,7 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/pkg/rootless" "github.com/cri-o/cri-o/internal/dbusmgr" "github.com/cri-o/cri-o/utils" . "github.com/onsi/ginkgo/v2" diff --git a/vendor/capnproto.org/go/capnp/v3/capability.go b/vendor/capnproto.org/go/capnp/v3/capability.go index 3b42a97c8eb..cab6fa53a7b 100644 --- a/vendor/capnproto.org/go/capnp/v3/capability.go +++ b/vendor/capnproto.org/go/capnp/v3/capability.go @@ -292,12 +292,12 @@ func (c *Client) SendCall(ctx context.Context, s Send) (*Answer, ReleaseFunc) { } limiter := c.GetFlowLimiter() - var gotResponse func() // We need to call PlaceArgs before we will know the size of message for // flow control purposes, so wrap it in a function that measures after the // arguments have been placed: placeArgs := s.PlaceArgs + var size uint64 s.PlaceArgs = func(args Struct) error { var err error if placeArgs != nil { @@ -307,30 +307,48 @@ func (c *Client) SendCall(ctx context.Context, s Send) (*Answer, ReleaseFunc) { } } - var size uint64 size, err = args.Segment().Message().TotalSize() - if err != nil { - return err - } - - gotResponse, err = limiter.StartMessage(ctx, size) return err } - ans, err := h.Send(ctx, s) - if err == nil { - p := ans.f.promise - p.mu.Lock() - if p.isResolved() { - // Wow, that was fast. - p.mu.Unlock() - gotResponse() - } else { - p.signals = append(p.signals, gotResponse) - p.mu.Unlock() - } + ans, rel := h.Send(ctx, s) + // FIXME: an earlier version of this code called StartMessage() from + // within PlaceArgs -- but that can result in a deadlock, since it means + // the client hook is holding a lock while we're waiting on the limiter. + // + // As a temporary workaround, we instead do StartMessage *after* the send. + // This still has a bug, but a much less serious one: we may slightly + // over-use our limit, but only by the size of a single message. This is + // mostly a problem in that it contradicts the documentation and is + // conceptually odd. + // + // Longer term, we should fix a more serious design problem: Send() is + // holding a lock while calling into user code (PlaceArgs), so this + // deadlock could also arise if the user code blocks. Once that is solved, + // we can back out this hack. + gotResponse, err := limiter.StartMessage(ctx, size) + if err != nil { + // HACK: An error should only happen if the context was cancelled, + // in which case the caller will notice it soon probably. The call + // still went off ok, so we can just return the result we already + // got, and trying to report the error is awkward because we can't + // return one... so we don't. Set gotResponse to something that won't + // break things, and call it a day. See comments above about a + // longer term solution to this mess. + gotResponse = func() {} + } + p := ans.f.promise + p.mu.Lock() + if p.isResolved() { + // Wow, that was fast. + p.mu.Unlock() + gotResponse() + } else { + p.signals = append(p.signals, gotResponse) + p.mu.Unlock() } - return ans, err + + return ans, rel } // RecvCall starts executing a method with the referenced arguments diff --git a/vendor/capnproto.org/go/capnp/v3/flowcontrol/fixed.go b/vendor/capnproto.org/go/capnp/v3/flowcontrol/fixed.go index cd53c68534a..f7805593cbe 100644 --- a/vendor/capnproto.org/go/capnp/v3/flowcontrol/fixed.go +++ b/vendor/capnproto.org/go/capnp/v3/flowcontrol/fixed.go @@ -2,108 +2,33 @@ package flowcontrol import ( "context" - "sync" - - "capnproto.org/go/capnp/v3/internal/chanmutex" + "fmt" + "golang.org/x/sync/semaphore" + "math" ) // Returns a FlowLimiter that enforces a fixed limit on the total size of // outstanding messages. -func NewFixedLimiter(size uint64) FlowLimiter { - return &fixedLimiter{ - total: size, - avail: size, - } +func NewFixedLimiter(size int64) FlowLimiter { + return (*fixedLimiter)(semaphore.NewWeighted(size)) } -type fixedLimiter struct { - mu sync.Mutex - total, avail uint64 - - pending requestQueue -} +type fixedLimiter semaphore.Weighted func (fl *fixedLimiter) StartMessage(ctx context.Context, size uint64) (gotResponse func(), err error) { - gotResponse = fl.makeCallback(size) - fl.mu.Lock() - ready := fl.pending.put(size) - fl.pumpQueue() - fl.mu.Unlock() - select { - case <-ready: - return gotResponse, nil - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -// Unblock as many requests as the limit will allow. -// -// The caller must be holding fl.mu. -func (fl *fixedLimiter) pumpQueue() { - next := fl.pending.peek() - for next != nil && next.size <= fl.avail { - fl.avail -= next.size - fl.pending.next() // remove it from the queue - next.ready.Unlock() - next = fl.pending.peek() + if size > math.MaxInt64 { + // semaphore.Weighted expects an int64, so we need to check the bounds. + return nil, fmt.Errorf( + "StartMessage(): limit %v is too large (max %v)", + size, int64(math.MaxInt64), + ) + } + w := (*semaphore.Weighted)(fl) + err = w.Acquire(ctx, int64(size)) + if err != nil { + return nil, err } -} - -// Return a function which, when called, will return `size` bytes to the -// available pool. -func (fl *fixedLimiter) makeCallback(size uint64) func() { return func() { - fl.mu.Lock() - defer fl.mu.Unlock() - fl.avail += size - fl.pumpQueue() - } -} - -// A queue of requests to send. -type requestQueue struct { - head, tail *request -} - -// Node in the linked list that makes up requestQueue. -type request struct { - // Unlock this when ready: - ready chanmutex.Mutex - - size uint64 - next *request -} - -// Look at the first request in the queue, without dequeueing it. -func (q *requestQueue) peek() *request { - return q.head -} - -// Drop the first request from the queue. -func (q *requestQueue) next() { - if q.head != nil { - q.head = q.head.next - } - if q.head == nil { - q.tail = nil - } -} - -// Enqueue a request to send `size` bytes. Returns a locked mutex -// which will be unlocked when it is appropriate to send. -func (q *requestQueue) put(size uint64) chanmutex.Mutex { - req := &request{ - ready: chanmutex.NewLocked(), - size: size, - next: nil, - } - if q.tail == nil { - q.tail = req - q.head = req - } else { - q.tail.next = req - q.tail = req - } - return req.ready + w.Release(int64(size)) + }, nil } diff --git a/vendor/capnproto.org/go/capnp/v3/flowcontrol/flowlimiter.go b/vendor/capnproto.org/go/capnp/v3/flowcontrol/flowlimiter.go index fa7be0aaa6a..e26891f86a0 100644 --- a/vendor/capnproto.org/go/capnp/v3/flowcontrol/flowlimiter.go +++ b/vendor/capnproto.org/go/capnp/v3/flowcontrol/flowlimiter.go @@ -19,7 +19,7 @@ import ( // A `FlowLimiter` is used to manage flow control for a stream of messages. type FlowLimiter interface { - // StartMessage informs the flow limiter than the caller wants to + // StartMessage informs the flow limiter that the caller wants to // send a message of the specified size. It blocks until an appropriate // time to do so, or until the context is canceled. If the returned // error is nil, the caller should then proceed in sending the message diff --git a/vendor/capnproto.org/go/capnp/v3/rpc/rpc.go b/vendor/capnproto.org/go/capnp/v3/rpc/rpc.go index c3ba297e812..e744085e8b7 100644 --- a/vendor/capnproto.org/go/capnp/v3/rpc/rpc.go +++ b/vendor/capnproto.org/go/capnp/v3/rpc/rpc.go @@ -936,78 +936,89 @@ func (c *Conn) handleReturn(ctx context.Context, ret rpccp.Return, release capnp if pr.parseFailed { c.er.ReportError(rpcerr.Annotate(pr.err, "incoming return")) } - switch { - case q.bootstrapPromise != nil && pr.err == nil: - q.release = func() {} - syncutil.Without(&c.mu, func() { - q.p.Fulfill(pr.result) - q.bootstrapPromise.Fulfill(q.p.Answer().Client()) - q.p.ReleaseClients() - clearCapTable(pr.result.Message()) - release() - }) - case q.bootstrapPromise != nil && pr.err != nil: - // TODO(someday): send unimplemented message back to remote if - // pr.unimplemented == true. - q.release = func() {} - syncutil.Without(&c.mu, func() { - q.p.Reject(pr.err) - q.bootstrapPromise.Fulfill(q.p.Answer().Client()) - q.p.ReleaseClients() - release() - }) - case q.bootstrapPromise == nil && pr.err != nil: - // TODO(someday): send unimplemented message back to remote if - // pr.unimplemented == true. - q.release = func() {} - syncutil.Without(&c.mu, func() { - q.p.Reject(pr.err) - release() - }) - default: - m := ret.Message() - q.release = func() { - clearCapTable(m) - release() - } - syncutil.Without(&c.mu, func() { - q.p.Fulfill(pr.result) - }) - } - c.mu.Unlock() - // Send disembargoes. Failing to send one of these just never lifts - // the embargo on our side, but doesn't cause a leak. + // We're going to potentially block fulfilling some promises so fork + // off a goroutine to avoid blocking the receive loop. // - // TODO(soon): make embargo resolve to error client. - for _, s := range pr.disembargoes { - c.sendMessage(ctx, s.buildDisembargo, func(err error) { - err = fmt.Errorf("incoming return: send disembargo: %w", err) - c.er.ReportError(err) - }) - } - - // Send finish. - c.sendMessage(ctx, func(m rpccp.Message) error { - fin, err := m.NewFinish() - if err == nil { - fin.SetQuestionId(uint32(qid)) - fin.SetReleaseResultCaps(false) + // TODO(cleanup): This is a bit weird in that we hold the lock across + // the go statement, and do the unlock in the new goroutine, but before + // we actually block. This was less weird when the go statement wasn't + // there, and we should rework this so it's easier to understand what's + // going on. + go func() { + switch { + case q.bootstrapPromise != nil && pr.err == nil: + q.release = func() {} + syncutil.Without(&c.mu, func() { + q.p.Fulfill(pr.result) + q.bootstrapPromise.Fulfill(q.p.Answer().Client()) + q.p.ReleaseClients() + clearCapTable(pr.result.Message()) + release() + }) + case q.bootstrapPromise != nil && pr.err != nil: + // TODO(someday): send unimplemented message back to remote if + // pr.unimplemented == true. + q.release = func() {} + syncutil.Without(&c.mu, func() { + q.p.Reject(pr.err) + q.bootstrapPromise.Fulfill(q.p.Answer().Client()) + q.p.ReleaseClients() + release() + }) + case q.bootstrapPromise == nil && pr.err != nil: + // TODO(someday): send unimplemented message back to remote if + // pr.unimplemented == true. + q.release = func() {} + syncutil.Without(&c.mu, func() { + q.p.Reject(pr.err) + release() + }) + default: + m := ret.Message() + q.release = func() { + clearCapTable(m) + release() + } + syncutil.Without(&c.mu, func() { + q.p.Fulfill(pr.result) + }) } - return err - }, func(err error) { - c.mu.Lock() - defer c.mu.Unlock() - defer close(q.finishMsgSend) + c.mu.Unlock() - if err != nil { - err = fmt.Errorf("incoming return: send finish: build message: %w", err) - c.er.ReportError(err) - } else { - q.flags |= finishSent - c.questionID.remove(uint32(qid)) + // Send disembargoes. Failing to send one of these just never lifts + // the embargo on our side, but doesn't cause a leak. + // + // TODO(soon): make embargo resolve to error client. + for _, s := range pr.disembargoes { + c.sendMessage(ctx, s.buildDisembargo, func(err error) { + err = fmt.Errorf("incoming return: send disembargo: %w", err) + c.er.ReportError(err) + }) } - }) + + // Send finish. + c.sendMessage(ctx, func(m rpccp.Message) error { + fin, err := m.NewFinish() + if err == nil { + fin.SetQuestionId(uint32(qid)) + fin.SetReleaseResultCaps(false) + } + return err + }, func(err error) { + c.mu.Lock() + defer c.mu.Unlock() + defer close(q.finishMsgSend) + + if err != nil { + err = fmt.Errorf("incoming return: send finish: build message: %w", err) + c.er.ReportError(err) + } else { + q.flags |= finishSent + c.questionID.remove(uint32(qid)) + } + }) + }() return nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0d2eb45b8e1..0952d8c7392 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -74,6 +74,15 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) } + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + // And for DefaultMode. selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} if config.AEAD().Mode() != packet.AEADModeEAX { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index c3fedf7a250..44c6ebf0ddd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -37,15 +37,17 @@ type Identity struct { Name string // by convention, has the form "Full Name (comment) " UserId *packet.UserId SelfSignature *packet.Signature - Signatures []*packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures } // A Subkey is an additional public key in an Entity. Subkeys can be used for // encryption. type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature } // A Key identifies a specific public key in an Entity. This is either the @@ -55,6 +57,7 @@ type Key struct { PublicKey *packet.PublicKey PrivateKey *packet.PrivateKey SelfSignature *packet.Signature + Revocations []*packet.Signature } // A KeyRing provides access to public and private keys. @@ -71,28 +74,58 @@ type KeyRing interface { DecryptionKeys() []Key } -// PrimaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. func (e *Entity) PrimaryIdentity() *Identity { - var firstIdentity *Identity + var primaryIdentity *Identity for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident } } - return firstIdentity + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if (existingId == nil) { + return true + } + + if (len(existingId.Revocations) > len(potentialNewId.Revocations)) { + return true + } + + if (len(existingId.Revocations) < len(potentialNewId.Revocations)) { + return false + } + + if existingId.SelfSignature == nil { + return true + } + + if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) { + return false + } + + if (!(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) } // EncryptionKey returns the best candidate Key for encrypting a message to the // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { - // Fail to find any encryption key if the primary key has expired. + // Fail to find any encryption key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -104,6 +137,8 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { subkey.Sig.FlagEncryptCommunications && subkey.PublicKey.PubKeyAlgo.CanEncrypt() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { candidateSubkey = i maxTime = subkey.Sig.CreationTime @@ -112,17 +147,16 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we don't have any candidate subkeys for encryption and // the primary key doesn't have any usage metadata then we // assume that the primary key is ok. Or, if the primary key is // marked as ok to encrypt with, then we can obviously use it. - // Also, check expiry again just to be safe. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } return Key{}, false @@ -137,10 +171,13 @@ func (e *Entity) SigningKey(now time.Time) (Key, bool) { // SigningKeyById return the Key for signing a message with this // Entity and keyID. func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { - // Fail to find any signing key if the primary key has expired. + // Fail to find any signing key if the... i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked return Key{}, false } @@ -152,8 +189,10 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { subkey.Sig.FlagSign && subkey.PublicKey.PubKeyAlgo.CanSign() && !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && - (id == 0 || subkey.PrivateKey.KeyId == id) { + (id == 0 || subkey.PublicKey.KeyId == id) { candidateSubkey = idx maxTime = subkey.Sig.CreationTime } @@ -161,22 +200,63 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { if candidateSubkey != -1 { subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true } // If we have no candidate subkey then we assume that it's ok to sign // with the primary key. Or, if the primary key is marked as ok to - // sign with, then we can use it. Also, check expiry again just to be safe. + // sign with, then we can use it. if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired && - (id == 0 || e.PrivateKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true } // No keys with a valid Signing Flag or no keys matched the id passed in return Key{}, false } +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + // An EntityList contains one or more Entities. type EntityList []*Entity @@ -184,21 +264,14 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + ident := e.PrimaryIdentity() + selfSig := ident.SelfSignature + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } for _, subKey := range e.Subkeys { if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -210,15 +283,7 @@ func (el EntityList) KeysById(id uint64) (keys []Key) { // the bitwise-OR of packet.KeyFlag* values. func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { + if key.SelfSignature != nil && key.SelfSignature.FlagsValid && requiredUsage != 0 { var usage byte if key.SelfSignature.FlagCertify { usage |= packet.KeyFlagCertify @@ -247,7 +312,7 @@ func (el EntityList) DecryptionKeys() (keys []Key) { for _, e := range el { for _, subKey := range e.Subkeys { if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) } } } @@ -442,11 +507,22 @@ func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { break } - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { return errors.StructuralError("user ID self-signature invalid: " + err.Error()) } - if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { identity.SelfSignature = sig } identity.Signatures = append(identity.Signatures, sig) @@ -488,9 +564,9 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p switch sig.SigType { case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig + subKey.Revocations = append(subKey.Revocations, sig) case packet.SigTypeSubkeyBinding: - if shouldReplaceSubkeySig(subKey.Sig, sig) { + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { subKey.Sig = sig } } @@ -505,22 +581,6 @@ func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *p return nil } -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - // SerializePrivate serializes an Entity, including private key material, but // excluding signatures from other entities, to the given Writer. // Identities and subkeys are re-signed in case they changed since NewEntry. @@ -549,20 +609,37 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo if err != nil { return } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { return } if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) if err != nil { return } } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return + for _, revocation := range ident.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + if ident.SelfSignature != nil { + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } } } for _, subkey := range e.Subkeys { @@ -583,6 +660,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo } } } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return @@ -598,6 +681,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -615,6 +704,12 @@ func (e *Entity) Serialize(w io.Writer) error { if err != nil { return err } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } err = subkey.Sig.Serialize(w) if err != nil { return err @@ -659,14 +754,13 @@ func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Co // specified reason code and text (RFC4880 section-5.2.3.23). // If config is nil, sensible defaults will be used. func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeKeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } @@ -686,22 +780,21 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea return errors.InvalidArgumentError("given subkey is not associated with this key") } - reasonCode := uint8(reason) revSig := &packet.Signature{ Version: e.PrimaryKey.Version, CreationTime: config.Now(), SigType: packet.SigTypeSubkeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, Hash: config.Hash(), - RevocationReason: &reasonCode, + RevocationReason: &reason, RevocationReasonText: reasonText, IssuerKeyId: &e.PrimaryKey.KeyId, } - if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil { + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { return err } - sk.Sig = revSig + sk.Revocations = append(sk.Revocations, revSig) return nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go index 21d17c2f8be..a2219e64d1c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -138,6 +138,69 @@ heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB =IKnw -----END PGP PUBLIC KEY BLOCK-----` +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR @@ -304,33 +367,34 @@ X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w -bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE -FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL -CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl -1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo -aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy -OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5 -/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA -g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla -d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP -2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l -nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX -nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX -SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6 -rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA -0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/ -wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa -LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV -8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz -j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy -AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4 -1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ -QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp -f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn -3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK -2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA -ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi -f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj -5KjhX2PVNEJd3XZRzaXZE2aAMQ== -=522n +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT -----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index fe0da9d3685..65203813778 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -395,6 +395,7 @@ const ( SigTypeDirectSignature = 0x1F SigTypeKeyRevocation = 0x20 SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 ) // PublicKeyAlgorithm represents the different public key system specified for diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 28971c1ada2..297dc40ba8c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -735,13 +735,13 @@ func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, -// made by the passed in signingKey. -func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) if err != nil { return err } - return signingKey.VerifySignature(h, sig) + return pk.VerifySignature(h, sig) } // userIdSignatureHash returns a Hash of the message that needs to be signed diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 6b1704e4292..86602d02904 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -28,6 +28,10 @@ const ( KeyFlagSign KeyFlagEncryptCommunications KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + _ + KeyFlagGroupKey ) // Signature represents a signature. See RFC 4880, section 5.2. @@ -68,14 +72,19 @@ type Signature struct { IssuerFingerprint []byte IsPrimaryId *bool + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + // FlagsValid is set if any flags were given. See RFC 4880, section // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 + RevocationReason *ReasonForRevocation RevocationReasonText string // In a self-signature, these flags are set there is a features subpacket @@ -218,6 +227,7 @@ const ( prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 reasonForRevocationSubpacket signatureSubpacketType = 29 featuresSubpacket signatureSubpacketType = 30 @@ -368,6 +378,15 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if subpacket[0]&KeyFlagEncryptStorage != 0 { sig.FlagEncryptStorage = true } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } case reasonForRevocationSubpacket: // Reason For Revocation, section 5.2.3.23 if !isHashed { @@ -377,8 +396,8 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("empty revocation reason subpacket") return } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = ReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: // Features subpacket, section 5.2.3.24 specifies a very general @@ -416,6 +435,12 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) } + case policyUriSubpacket: + // Policy URI, section 5.2.3.20 + if !isHashed { + return + } + sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: v, l := subpacket[0], len(subpacket[1:]) if v == 5 && l != 32 || v != 5 && l != 20 { @@ -507,6 +532,9 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { if subpacket.hashed == hashed { n := serializeSubpacketLength(to, len(subpacket.contents)+1) to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } to = to[1+n:] n = copy(to, subpacket.contents) to = to[n:] @@ -714,6 +742,14 @@ func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config return sig.Sign(h, priv, config) } +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + // Serialize marshals sig to w. Sign, SignUserId or SignKey must have been // called first. func (sig *Signature) Serialize(w io.Writer) (err error) { @@ -826,7 +862,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } if sig.IssuerFingerprint != nil { contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, true, contents}) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) } if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { sigLifetime := make([]byte, 4) @@ -850,6 +886,15 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagEncryptStorage { flags |= KeyFlagEncryptStorage } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) } @@ -892,6 +937,10 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) } + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + if len(sig.PreferredAEAD) > 0 { subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) } @@ -899,7 +948,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. if sig.RevocationReason != nil { subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)}) + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) } // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index d6bea7d4acc..614fbafd5e1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -156,5 +156,12 @@ func parseUserId(id string) (name, comment, email string) { name = strings.TrimSpace(id[n.start:n.end]) comment = strings.TrimSpace(id[c.start:c.end]) email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index a649ebbab39..acb7cc6e29f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -370,11 +370,25 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // If signature KeyID matches if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { - scr.md.Signature = sig - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) { - scr.md.SignatureError = errors.ErrSignatureExpired + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + now := scr.config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + signatureError = errors.ErrKeyRevoked + } + if sig.SigExpired(now) { + signatureError = errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { + signatureError = errors.ErrKeyExpired + } } + scr.md.Signature = sig + scr.md.SignatureError = signatureError } else { scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) } @@ -483,10 +497,16 @@ func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, err = key.PublicKey.VerifySignature(h, sig) if err == nil { now := config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + return key.Entity, errors.ErrKeyRevoked + } if sig.SigExpired(now) { return key.Entity, errors.ErrSignatureExpired } - if key.PublicKey.KeyExpired(key.SelfSignature, now) { + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { return key.Entity, errors.ErrKeyExpired } return key.Entity, nil diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index bb74b13872a..23a24f20305 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -208,6 +208,15 @@ func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signe return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) } +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + // writeAndSign writes the data as a payload package and, optionally, signs // it. hints contains optional information, that is also encrypted, // that aids the recipients in processing the message. The resulting @@ -358,7 +367,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En var ok bool encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } sig := to[i].PrimaryIdentity().SelfSignature @@ -416,7 +425,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } var payload io.WriteCloser - if aeadSupported { + if config.AEAD() != nil && aeadSupported { payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) if err != nil { return diff --git a/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go index 7c59ed23f0e..3b1abd928e8 100644 --- a/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go +++ b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go @@ -48,12 +48,16 @@ const ( // kubelet archive CheckpointedPodsFile = "checkpointed.pods" // container archive - ConfigDumpFile = "config.dump" - SpecDumpFile = "spec.dump" - NetworkStatusFile = "network.status" - CheckpointDirectory = "checkpoint" - RootFsDiffTar = "rootfs-diff.tar" - DeletedFilesFile = "deleted.files" + ConfigDumpFile = "config.dump" + SpecDumpFile = "spec.dump" + NetworkStatusFile = "network.status" + CheckpointDirectory = "checkpoint" + CheckpointVolumesDirectory = "volumes" + DevShmCheckpointTar = "devshm-checkpoint.tar" + RootFsDiffTar = "rootfs-diff.tar" + DeletedFilesFile = "deleted.files" + DumpLogFile = "dump.log" + RestoreLogFile = "restore.log" // pod archive PodOptionsFile = "pod.options" PodDumpFile = "pod.dump" diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/magic/types.go b/vendor/github.com/checkpoint-restore/go-criu/v5/magic/types.go new file mode 100644 index 00000000000..24cc3989a68 --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/magic/types.go @@ -0,0 +1,12 @@ +package magic + +const ( + ImgCommonMagic = 0x54564319 /* Sarov (a.k.a. Arzamas-16) */ + ImgServiceMagic = 0x55105940 /* Zlatoust */ + StatsMagic = 0x57093306 /* Ostashkov */ + + PrimaryMagicOffset = 0x0 + SecondaryMagicOffset = 0x4 + SizeOffset = 0x8 + PayloadOffset = 0xC +) diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/stats/stats.pb.go b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/stats.pb.go new file mode 100644 index 00000000000..ff011fc2c50 --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/stats.pb.go @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: MIT + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 +// source: stats/stats.proto + +package stats + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This one contains statistics about dump/restore process +type DumpStatsEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FreezingTime *uint32 `protobuf:"varint,1,req,name=freezing_time,json=freezingTime" json:"freezing_time,omitempty"` + FrozenTime *uint32 `protobuf:"varint,2,req,name=frozen_time,json=frozenTime" json:"frozen_time,omitempty"` + MemdumpTime *uint32 `protobuf:"varint,3,req,name=memdump_time,json=memdumpTime" json:"memdump_time,omitempty"` + MemwriteTime *uint32 `protobuf:"varint,4,req,name=memwrite_time,json=memwriteTime" json:"memwrite_time,omitempty"` + PagesScanned *uint64 `protobuf:"varint,5,req,name=pages_scanned,json=pagesScanned" json:"pages_scanned,omitempty"` + PagesSkippedParent *uint64 `protobuf:"varint,6,req,name=pages_skipped_parent,json=pagesSkippedParent" json:"pages_skipped_parent,omitempty"` + PagesWritten *uint64 `protobuf:"varint,7,req,name=pages_written,json=pagesWritten" json:"pages_written,omitempty"` + IrmapResolve *uint32 `protobuf:"varint,8,opt,name=irmap_resolve,json=irmapResolve" json:"irmap_resolve,omitempty"` + PagesLazy *uint64 `protobuf:"varint,9,req,name=pages_lazy,json=pagesLazy" json:"pages_lazy,omitempty"` + PagePipes *uint64 `protobuf:"varint,10,opt,name=page_pipes,json=pagePipes" json:"page_pipes,omitempty"` + PagePipeBufs *uint64 `protobuf:"varint,11,opt,name=page_pipe_bufs,json=pagePipeBufs" json:"page_pipe_bufs,omitempty"` + ShpagesScanned *uint64 `protobuf:"varint,12,opt,name=shpages_scanned,json=shpagesScanned" json:"shpages_scanned,omitempty"` + ShpagesSkippedParent *uint64 `protobuf:"varint,13,opt,name=shpages_skipped_parent,json=shpagesSkippedParent" json:"shpages_skipped_parent,omitempty"` + ShpagesWritten *uint64 `protobuf:"varint,14,opt,name=shpages_written,json=shpagesWritten" json:"shpages_written,omitempty"` +} + +func (x *DumpStatsEntry) Reset() { + *x = DumpStatsEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_stats_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DumpStatsEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DumpStatsEntry) ProtoMessage() {} + +func (x *DumpStatsEntry) ProtoReflect() protoreflect.Message { + mi := &file_stats_stats_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DumpStatsEntry.ProtoReflect.Descriptor instead. +func (*DumpStatsEntry) Descriptor() ([]byte, []int) { + return file_stats_stats_proto_rawDescGZIP(), []int{0} +} + +func (x *DumpStatsEntry) GetFreezingTime() uint32 { + if x != nil && x.FreezingTime != nil { + return *x.FreezingTime + } + return 0 +} + +func (x *DumpStatsEntry) GetFrozenTime() uint32 { + if x != nil && x.FrozenTime != nil { + return *x.FrozenTime + } + return 0 +} + +func (x *DumpStatsEntry) GetMemdumpTime() uint32 { + if x != nil && x.MemdumpTime != nil { + return *x.MemdumpTime + } + return 0 +} + +func (x *DumpStatsEntry) GetMemwriteTime() uint32 { + if x != nil && x.MemwriteTime != nil { + return *x.MemwriteTime + } + return 0 +} + +func (x *DumpStatsEntry) GetPagesScanned() uint64 { + if x != nil && x.PagesScanned != nil { + return *x.PagesScanned + } + return 0 +} + +func (x *DumpStatsEntry) GetPagesSkippedParent() uint64 { + if x != nil && x.PagesSkippedParent != nil { + return *x.PagesSkippedParent + } + return 0 +} + +func (x *DumpStatsEntry) GetPagesWritten() uint64 { + if x != nil && x.PagesWritten != nil { + return *x.PagesWritten + } + return 0 +} + +func (x *DumpStatsEntry) GetIrmapResolve() uint32 { + if x != nil && x.IrmapResolve != nil { + return *x.IrmapResolve + } + return 0 +} + +func (x *DumpStatsEntry) GetPagesLazy() uint64 { + if x != nil && x.PagesLazy != nil { + return *x.PagesLazy + } + return 0 +} + +func (x *DumpStatsEntry) GetPagePipes() uint64 { + if x != nil && x.PagePipes != nil { + return *x.PagePipes + } + return 0 +} + +func (x *DumpStatsEntry) GetPagePipeBufs() uint64 { + if x != nil && x.PagePipeBufs != nil { + return *x.PagePipeBufs + } + return 0 +} + +func (x *DumpStatsEntry) GetShpagesScanned() uint64 { + if x != nil && x.ShpagesScanned != nil { + return *x.ShpagesScanned + } + return 0 +} + +func (x *DumpStatsEntry) GetShpagesSkippedParent() uint64 { + if x != nil && x.ShpagesSkippedParent != nil { + return *x.ShpagesSkippedParent + } + return 0 +} + +func (x *DumpStatsEntry) GetShpagesWritten() uint64 { + if x != nil && x.ShpagesWritten != nil { + return *x.ShpagesWritten + } + return 0 +} + +type RestoreStatsEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PagesCompared *uint64 `protobuf:"varint,1,req,name=pages_compared,json=pagesCompared" json:"pages_compared,omitempty"` + PagesSkippedCow *uint64 `protobuf:"varint,2,req,name=pages_skipped_cow,json=pagesSkippedCow" json:"pages_skipped_cow,omitempty"` + ForkingTime *uint32 `protobuf:"varint,3,req,name=forking_time,json=forkingTime" json:"forking_time,omitempty"` + RestoreTime *uint32 `protobuf:"varint,4,req,name=restore_time,json=restoreTime" json:"restore_time,omitempty"` + PagesRestored *uint64 `protobuf:"varint,5,opt,name=pages_restored,json=pagesRestored" json:"pages_restored,omitempty"` +} + +func (x *RestoreStatsEntry) Reset() { + *x = RestoreStatsEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_stats_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreStatsEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreStatsEntry) ProtoMessage() {} + +func (x *RestoreStatsEntry) ProtoReflect() protoreflect.Message { + mi := &file_stats_stats_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreStatsEntry.ProtoReflect.Descriptor instead. +func (*RestoreStatsEntry) Descriptor() ([]byte, []int) { + return file_stats_stats_proto_rawDescGZIP(), []int{1} +} + +func (x *RestoreStatsEntry) GetPagesCompared() uint64 { + if x != nil && x.PagesCompared != nil { + return *x.PagesCompared + } + return 0 +} + +func (x *RestoreStatsEntry) GetPagesSkippedCow() uint64 { + if x != nil && x.PagesSkippedCow != nil { + return *x.PagesSkippedCow + } + return 0 +} + +func (x *RestoreStatsEntry) GetForkingTime() uint32 { + if x != nil && x.ForkingTime != nil { + return *x.ForkingTime + } + return 0 +} + +func (x *RestoreStatsEntry) GetRestoreTime() uint32 { + if x != nil && x.RestoreTime != nil { + return *x.RestoreTime + } + return 0 +} + +func (x *RestoreStatsEntry) GetPagesRestored() uint64 { + if x != nil && x.PagesRestored != nil { + return *x.PagesRestored + } + return 0 +} + +type StatsEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dump *DumpStatsEntry `protobuf:"bytes,1,opt,name=dump" json:"dump,omitempty"` + Restore *RestoreStatsEntry `protobuf:"bytes,2,opt,name=restore" json:"restore,omitempty"` +} + +func (x *StatsEntry) Reset() { + *x = StatsEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_stats_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsEntry) ProtoMessage() {} + +func (x *StatsEntry) ProtoReflect() protoreflect.Message { + mi := &file_stats_stats_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsEntry.ProtoReflect.Descriptor instead. +func (*StatsEntry) Descriptor() ([]byte, []int) { + return file_stats_stats_proto_rawDescGZIP(), []int{2} +} + +func (x *StatsEntry) GetDump() *DumpStatsEntry { + if x != nil { + return x.Dump + } + return nil +} + +func (x *StatsEntry) GetRestore() *RestoreStatsEntry { + if x != nil { + return x.Restore + } + return nil +} + +var File_stats_stats_proto protoreflect.FileDescriptor + +var file_stats_stats_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x04, 0x0a, 0x10, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x65, 0x65, + 0x7a, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0d, 0x52, + 0x0c, 0x66, 0x72, 0x65, 0x65, 0x7a, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x02, + 0x28, 0x0d, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x6d, 0x65, 0x6d, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x02, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x64, 0x75, 0x6d, 0x70, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x02, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, + 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x05, 0x20, 0x02, 0x28, 0x04, 0x52, 0x0c, 0x70, + 0x61, 0x67, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x70, + 0x61, 0x67, 0x65, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x02, 0x28, 0x04, 0x52, 0x12, 0x70, 0x61, 0x67, 0x65, 0x73, + 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x07, + 0x20, 0x02, 0x28, 0x04, 0x52, 0x0c, 0x70, 0x61, 0x67, 0x65, 0x73, 0x57, 0x72, 0x69, 0x74, 0x74, + 0x65, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x72, 0x6d, 0x61, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x6c, 0x76, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x69, 0x72, 0x6d, 0x61, 0x70, + 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x73, + 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x09, 0x20, 0x02, 0x28, 0x04, 0x52, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x73, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x70, + 0x69, 0x70, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x50, 0x69, 0x70, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x69, + 0x70, 0x65, 0x5f, 0x62, 0x75, 0x66, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x70, + 0x61, 0x67, 0x65, 0x50, 0x69, 0x70, 0x65, 0x42, 0x75, 0x66, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x73, + 0x68, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x68, 0x70, 0x61, 0x67, 0x65, 0x73, 0x53, 0x63, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x68, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, + 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x68, 0x70, 0x61, 0x67, 0x65, 0x73, 0x53, 0x6b, 0x69, + 0x70, 0x70, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x68, + 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x68, 0x70, 0x61, 0x67, 0x65, 0x73, 0x57, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x22, 0xd5, 0x01, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x61, 0x67, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x02, 0x28, 0x04, 0x52, 0x0d, 0x70, 0x61, 0x67, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, + 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x02, 0x28, 0x04, 0x52, 0x0f, 0x70, + 0x61, 0x67, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x77, 0x12, 0x21, + 0x0a, 0x0c, 0x66, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x02, 0x28, 0x0d, 0x52, 0x0b, 0x66, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x02, 0x28, 0x0d, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x70, 0x61, + 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x22, 0x64, 0x0a, 0x0b, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x04, 0x64, 0x75, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x64, 0x75, 0x6d, 0x70, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x75, 0x6d, + 0x70, 0x12, 0x2e, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x73, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, +} + +var ( + file_stats_stats_proto_rawDescOnce sync.Once + file_stats_stats_proto_rawDescData = file_stats_stats_proto_rawDesc +) + +func file_stats_stats_proto_rawDescGZIP() []byte { + file_stats_stats_proto_rawDescOnce.Do(func() { + file_stats_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_stats_stats_proto_rawDescData) + }) + return file_stats_stats_proto_rawDescData +} + +var file_stats_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_stats_stats_proto_goTypes = []interface{}{ + (*DumpStatsEntry)(nil), // 0: dump_stats_entry + (*RestoreStatsEntry)(nil), // 1: restore_stats_entry + (*StatsEntry)(nil), // 2: stats_entry +} +var file_stats_stats_proto_depIdxs = []int32{ + 0, // 0: stats_entry.dump:type_name -> dump_stats_entry + 1, // 1: stats_entry.restore:type_name -> restore_stats_entry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_stats_stats_proto_init() } +func file_stats_stats_proto_init() { + if File_stats_stats_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_stats_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DumpStatsEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_stats_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreStatsEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_stats_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_stats_stats_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_stats_stats_proto_goTypes, + DependencyIndexes: file_stats_stats_proto_depIdxs, + MessageInfos: file_stats_stats_proto_msgTypes, + }.Build() + File_stats_stats_proto = out.File + file_stats_stats_proto_rawDesc = nil + file_stats_stats_proto_goTypes = nil + file_stats_stats_proto_depIdxs = nil +} diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/stats/stats.proto b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/stats.proto new file mode 100644 index 00000000000..64e46181dad --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/stats.proto @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: MIT + +syntax = "proto2"; + +// This one contains statistics about dump/restore process +message dump_stats_entry { + required uint32 freezing_time = 1; + required uint32 frozen_time = 2; + required uint32 memdump_time = 3; + required uint32 memwrite_time = 4; + + required uint64 pages_scanned = 5; + required uint64 pages_skipped_parent = 6; + required uint64 pages_written = 7; + + optional uint32 irmap_resolve = 8; + + required uint64 pages_lazy = 9; + optional uint64 page_pipes = 10; + optional uint64 page_pipe_bufs = 11; + + optional uint64 shpages_scanned = 12; + optional uint64 shpages_skipped_parent = 13; + optional uint64 shpages_written = 14; +} + +message restore_stats_entry { + required uint64 pages_compared = 1; + required uint64 pages_skipped_cow = 2; + + required uint32 forking_time = 3; + required uint32 restore_time = 4; + + optional uint64 pages_restored = 5; +} + +message stats_entry { + optional dump_stats_entry dump = 1; + optional restore_stats_entry restore = 2; +} diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/stats/types.go b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/types.go new file mode 100644 index 00000000000..9044ad97624 --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/types.go @@ -0,0 +1,6 @@ +package stats + +const ( + StatsDump = "stats-dump" + StatsRestore = "stats-restore" +) diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/stats/utils.go b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/utils.go new file mode 100644 index 00000000000..368b2039ef8 --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/stats/utils.go @@ -0,0 +1,54 @@ +package stats + +import ( + "encoding/binary" + "errors" + "io/ioutil" + "os" + "path/filepath" + + "github.com/checkpoint-restore/go-criu/v5/magic" + "google.golang.org/protobuf/proto" +) + +func readStatisticsFile(imgDir *os.File, fileName string) (*StatsEntry, error) { + buf, err := ioutil.ReadFile(filepath.Join(imgDir.Name(), fileName)) + if err != nil { + return nil, err + } + + if binary.LittleEndian.Uint32(buf[magic.PrimaryMagicOffset:magic.SecondaryMagicOffset]) != magic.ImgServiceMagic { + return nil, errors.New("Primary magic not found") + } + + if binary.LittleEndian.Uint32(buf[magic.SecondaryMagicOffset:magic.SizeOffset]) != magic.StatsMagic { + return nil, errors.New("Secondary magic not found") + } + + payloadSize := binary.LittleEndian.Uint32(buf[magic.SizeOffset:magic.PayloadOffset]) + + st := &StatsEntry{} + if err := proto.Unmarshal(buf[magic.PayloadOffset:magic.PayloadOffset+payloadSize], st); err != nil { + return nil, err + } + + return st, nil +} + +func CriuGetDumpStats(imgDir *os.File) (*DumpStatsEntry, error) { + st, err := readStatisticsFile(imgDir, StatsDump) + if err != nil { + return nil, err + } + + return st.GetDump(), nil +} + +func CriuGetRestoreStats(imgDir *os.File) (*RestoreStatsEntry, error) { + st, err := readStatisticsFile(imgDir, StatsRestore) + if err != nil { + return nil, err + } + + return st.GetRestore(), nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/devices.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/devices.go deleted file mode 100644 index 50e6353cc39..00000000000 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/devices.go +++ /dev/null @@ -1,181 +0,0 @@ -package pkg - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - cdispec "github.com/container-orchestrated-devices/container-device-interface/specs-go" - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - root = "/etc/cdi" -) - -func collectCDISpecs() (map[string]*cdispec.Spec, error) { - var files []string - vendor := make(map[string]*cdispec.Spec) - - err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if info == nil || info.IsDir() { - return nil - } - - if filepath.Ext(path) != ".json" { - return nil - } - - files = append(files, path) - return nil - }) - - if err != nil { - return nil, err - } - - for _, path := range files { - spec, err := loadCDIFile(path) - if err != nil { - continue - } - - if _, ok := vendor[spec.Kind]; ok { - continue - } - - vendor[spec.Kind] = spec - } - - return vendor, nil -} - -// TODO: Validate (e.g: duplicate device names) -func loadCDIFile(path string) (*cdispec.Spec, error) { - file, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - var spec *cdispec.Spec - err = json.Unmarshal([]byte(file), &spec) - if err != nil { - return nil, err - } - - return spec, nil -} - -/* -* Pattern "vendor.com/device=myDevice" with the vendor being optional - */ -func extractVendor(dev string) (string, string) { - if strings.IndexByte(dev, '=') == -1 { - return "", dev - } - - split := strings.SplitN(dev, "=", 2) - return split[0], split[1] -} - -// GetCDIForDevice returns the CDI specification that matches the device name the user provided. -func GetCDIForDevice(dev string, specs map[string]*cdispec.Spec) (*cdispec.Spec, error) { - vendor, device := extractVendor(dev) - - if vendor != "" { - s, ok := specs[vendor] - if !ok { - return nil, fmt.Errorf("Could not find vendor %q for device %q", vendor, device) - } - - for _, d := range s.Devices { - if d.Name != device { - continue - } - - return s, nil - } - - return nil, fmt.Errorf("Could not find device %q for vendor %q", device, vendor) - } - - var found []*cdispec.Spec - var vendors []string - for vendor, spec := range specs { - - for _, d := range spec.Devices { - if d.Name != device { - continue - } - - found = append(found, spec) - vendors = append(vendors, vendor) - } - } - - if len(found) > 1 { - return nil, fmt.Errorf("%q is ambiguous and currently refers to multiple devices from different vendors: %q", dev, vendors) - } - - if len(found) == 1 { - return found[0], nil - } - - return nil, fmt.Errorf("Could not find device %q", dev) -} - -// HasDevice returns true if a device is a CDI device -// an error may be returned in cases where permissions may be required -func HasDevice(dev string) (bool, error) { - specs, err := collectCDISpecs() - if err != nil { - return false, err - } - - d, err := GetCDIForDevice(dev, specs) - if err != nil { - return false, err - } - - return d != nil, nil -} - -// UpdateOCISpecForDevices updates the given OCI spec based on the requested CDI devices -func UpdateOCISpecForDevices(ociconfig *spec.Spec, devs []string) error { - specs, err := collectCDISpecs() - if err != nil { - return err - } - - return UpdateOCISpecForDevicesWithSpec(ociconfig, devs, specs) -} - -// UpdateOCISpecForDevicesWithSpec updates the given OCI spec based on the requested CDI devices using the given CDI specs. -func UpdateOCISpecForDevicesWithSpec(ociconfig *spec.Spec, devs []string, specs map[string]*cdispec.Spec) error { - edits := make(map[string]*cdispec.Spec) - - for _, d := range devs { - spec, err := GetCDIForDevice(d, specs) - if err != nil { - return err - } - - edits[spec.Kind] = spec - _, device := extractVendor(d) - err = cdispec.ApplyOCIEditsForDevice(ociconfig, spec, device) - if err != nil { - return err - } - } - - for _, spec := range edits { - if err := cdispec.ApplyOCIEdits(ociconfig, spec); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go index b7f86da8695..d3d9839d639 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go @@ -14,5 +14,5 @@ limitations under the License. */ -// Package events defines the event pushing and subscription service. +// Package events defines the ttrpc event service. package events diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s b/vendor/github.com/containerd/containerd/api/types/task/doc.go similarity index 91% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s rename to vendor/github.com/containerd/containerd/api/types/task/doc.go index c073fa4ad7c..e10c7a46993 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s +++ b/vendor/github.com/containerd/containerd/api/types/task/doc.go @@ -13,3 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ + +// Package task defines the task service. +package task diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index 8b600673fe3..5606cc88a94 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,15 +21,14 @@ package cio import ( "context" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" "syscall" "github.com/containerd/fifo" - "github.com/pkg/errors" ) // NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root @@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) { return nil, err } } - dir, err := ioutil.TempDir(root, "") + dir, err := os.MkdirTemp(root, "") if err != nil { return nil, err } @@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { if fifos.Stdin != "" { if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdin fifo") + return f, fmt.Errorf("failed to open stdin fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdin != nil { @@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if fifos.Stdout != "" { if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdout fifo") + return f, fmt.Errorf("failed to open stdout fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdout != nil { @@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if !fifos.Terminal && fifos.Stderr != "" { if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stderr fifo") + return f, fmt.Errorf("failed to open stderr fifo: %w", retErr) } } return f, nil diff --git a/vendor/github.com/containerd/containerd/cio/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go index ded475788f6..f3d736a6d42 100644 --- a/vendor/github.com/containerd/containerd/cio/io_windows.go +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -23,7 +23,6 @@ import ( winio "github.com/Microsoft/go-winio" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) const pipeRoot = `\\.\pipe` @@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdin != "" { l, err := winio.ListenPipe(fifos.Stdin, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin) + return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err) } cios.closers = append(cios.closers, l) @@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdout != "" { l, err := winio.ListenPipe(fifos.Stdout, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout) + return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err) } cios.closers = append(cios.closers, l) @@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stderr != "" { l, err := winio.ListenPipe(fifos.Stderr, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr) + return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err) } cios.closers = append(cios.closers, l) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go new file mode 100644 index 00000000000..1391884cde7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = "/var/lib/containerd" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/var/run/containerd" + // DefaultAddress is the default unix socket address + DefaultAddress = "/var/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/var/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/var/run/containerd/fifo" + // DefaultRuntime would be a multiple of choices, thus empty + DefaultRuntime = "" + // DefaultConfigDir is the default location for config files. + DefaultConfigDir = "/etc/containerd" +) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 6b69cd06b9b..8e2619a381a 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index a80700075f9..9f4bed8b077 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index 05a35228ca4..87622559708 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -17,7 +17,7 @@ // Package errdefs defines the common errors used throughout containerd // packages. // -// Use with errors.Wrap and error.Wrapf to add context to an error. +// Use with fmt.Errorf to add context to an error. // // To detect an error class, use the IsXXX functions to tell whether an error // is of a certain type. @@ -28,8 +28,7 @@ package errdefs import ( "context" - - "github.com/pkg/errors" + "errors" ) // Definitions of common error types used throughout containerd. All containerd diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 209f63bd0fc..7a9b33e05af 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -18,9 +18,9 @@ package errdefs import ( "context" + "fmt" "strings" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -68,9 +68,9 @@ func ToGRPC(err error) error { // ToGRPCf maps the error to grpc error codes, assembling the formatting string // and combining it with the target error string. // -// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(errors.Wrapf(err, format, args...)) + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) } // FromGRPC returns the underlying error from a grpc service based on the grpc error code @@ -104,9 +104,9 @@ func FromGRPC(err error) error { msg := rebaseMessage(cls, err) if msg != "" { - err = errors.Wrap(cls, msg) + err = fmt.Errorf("%s: %w", msg, cls) } else { - err = errors.WithStack(cls) + err = cls } return err diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go new file mode 100644 index 00000000000..a1f385d7abd --- /dev/null +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -0,0 +1,251 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package exchange + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/typeurl" + goevents "github.com/docker/go-events" + "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" +) + +// Exchange broadcasts events +type Exchange struct { + broadcaster *goevents.Broadcaster +} + +// NewExchange returns a new event Exchange +func NewExchange() *Exchange { + return &Exchange{ + broadcaster: goevents.NewBroadcaster(), + } +} + +var _ events.Publisher = &Exchange{} +var _ events.Forwarder = &Exchange{} +var _ events.Subscriber = &Exchange{} + +// Forward accepts an envelope to be directly distributed on the exchange. +// +// This is useful when an event is forwarded on behalf of another namespace or +// when the event is propagated on behalf of another publisher. +func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) { + if err := validateEnvelope(envelope); err != nil { + return err + } + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error forwarding event") + } else { + logger.Debug("event forwarded") + } + }() + + return e.broadcaster.Write(envelope) +} + +// Publish packages and sends an event. The caller will be considered the +// initial publisher of the event. This means the timestamp will be calculated +// at this point and this method may read from the calling context. +func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) { + var ( + namespace string + encoded *types.Any + envelope events.Envelope + ) + + namespace, err = namespaces.NamespaceRequired(ctx) + if err != nil { + return fmt.Errorf("failed publishing event: %w", err) + } + if err := validateTopic(topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", topic, err) + } + + encoded, err = typeurl.MarshalAny(event) + if err != nil { + return err + } + + envelope.Timestamp = time.Now().UTC() + envelope.Namespace = namespace + envelope.Topic = topic + envelope.Event = encoded + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error publishing event") + } else { + logger.Debug("event published") + } + }() + + return e.broadcaster.Write(&envelope) +} + +// Subscribe to events on the exchange. Events are sent through the returned +// channel ch. If an error is encountered, it will be sent on channel errs and +// errs will be closed. To end the subscription, cancel the provided context. +// +// Zero or more filters may be provided as strings. Only events that match +// *any* of the provided filters will be sent on the channel. The filters use +// the standard containerd filters package syntax. +func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) { + var ( + evch = make(chan *events.Envelope) + errq = make(chan error, 1) + channel = goevents.NewChannel(0) + queue = goevents.NewQueue(channel) + dst goevents.Sink = queue + ) + + closeAll := func() { + channel.Close() + queue.Close() + e.broadcaster.Remove(dst) + close(errq) + } + + ch = evch + errs = errq + + if len(fs) > 0 { + filter, err := filters.ParseAll(fs...) + if err != nil { + errq <- fmt.Errorf("failed parsing subscription filters: %w", err) + closeAll() + return + } + + dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool { + return filter.Match(adapt(gev)) + })) + } + + e.broadcaster.Add(dst) + + go func() { + defer closeAll() + + var err error + loop: + for { + select { + case ev := <-channel.C: + env, ok := ev.(*events.Envelope) + if !ok { + // TODO(stevvooe): For the most part, we are well protected + // from this condition. Both Forward and Publish protect + // from this. + err = fmt.Errorf("invalid envelope encountered %#v; please file a bug", ev) + break + } + + select { + case evch <- env: + case <-ctx.Done(): + break loop + } + case <-ctx.Done(): + break loop + } + } + + if err == nil { + if cerr := ctx.Err(); cerr != context.Canceled { + err = cerr + } + } + + errq <- err + }() + + return +} + +func validateTopic(topic string) error { + if topic == "" { + return fmt.Errorf("must not be empty: %w", errdefs.ErrInvalidArgument) + } + + if topic[0] != '/' { + return fmt.Errorf("must start with '/': %w", errdefs.ErrInvalidArgument) + } + + if len(topic) == 1 { + return fmt.Errorf("must have at least one component: %w", errdefs.ErrInvalidArgument) + } + + components := strings.Split(topic[1:], "/") + for _, component := range components { + if err := identifiers.Validate(component); err != nil { + return fmt.Errorf("failed validation on component %q: %w", component, err) + } + } + + return nil +} + +func validateEnvelope(envelope *events.Envelope) error { + if err := identifiers.Validate(envelope.Namespace); err != nil { + return fmt.Errorf("event envelope has invalid namespace: %w", err) + } + + if err := validateTopic(envelope.Topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", envelope.Topic, err) + } + + if envelope.Timestamp.IsZero() { + return fmt.Errorf("timestamp must be set on forwarded event: %w", errdefs.ErrInvalidArgument) + } + + return nil +} + +func adapt(ev interface{}) filters.Adaptor { + if adaptor, ok := ev.(filters.Adaptor); ok { + return adaptor + } + + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + return "", false + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 00000000000..5a9c559c1e0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 00000000000..cf09d8d9e4f --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +// +package filters + +import ( + "regexp" + + "github.com/containerd/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 00000000000..49182d7b7bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,291 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := + +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, fmt.Errorf("filters: %w", err) + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return fmt.Errorf("parse error: %w", parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 00000000000..b76aab9b4a7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,252 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "errors" + "unicode/utf8" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 00000000000..6a485467b8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go index f52317b491b..cbd3a52ba9e 100644 --- a/vendor/github.com/containerd/containerd/identifiers/validate.go +++ b/vendor/github.com/containerd/containerd/identifiers/validate.go @@ -25,10 +25,10 @@ package identifiers import ( + "fmt" "regexp" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) const ( @@ -51,15 +51,15 @@ var ( // In general identifiers that pass this validation should be safe for use as filesystem path components. func Validate(s string) error { if len(s) == 0 { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty") + return fmt.Errorf("identifier must not be empty: %w", errdefs.ErrInvalidArgument) } if len(s) > maxLength { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength) + return fmt.Errorf("identifier %q greater than maximum length (%d characters): %w", s, maxLength, errdefs.ErrInvalidArgument) } if !identifierRe.MatchString(s) { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe) + return fmt.Errorf("identifier %q must match %v: %w", s, identifierRe, errdefs.ErrInvalidArgument) } return nil } diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 37b6a7d1c8a..0db9562b82b 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -52,7 +52,8 @@ const ( // WithLogger returns a new context with the provided logger. Use in // combination with logger.WithField(s) for great effect. func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) + e := logger.WithContext(ctx) + return context.WithValue(ctx, loggerKey{}, e) } // GetLogger retrieves the current logger from the context. If no logger is @@ -61,7 +62,7 @@ func GetLogger(ctx context.Context) *logrus.Entry { logger := ctx.Value(loggerKey{}) if logger == nil { - return L + return L.WithContext(ctx) } return logger.(*logrus.Entry) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index b53c9012c1b..e5e23fe4301 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -18,11 +18,11 @@ package namespaces import ( "context" + "fmt" "os" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/identifiers" - "github.com/pkg/errors" ) const ( @@ -69,10 +69,10 @@ func Namespace(ctx context.Context) (string, bool) { func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") + return "", fmt.Errorf("namespace is required: %w", errdefs.ErrFailedPrecondition) } if err := identifiers.Validate(namespace); err != nil { - return "", errors.Wrap(err, "namespace validation") + return "", fmt.Errorf("namespace validation: %w", err) } return namespace, nil } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go index b382215025d..176a0e6673a 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go @@ -16,11 +16,11 @@ package constants -// TODO(random-liu): Merge annotations package into this package. - const ( // K8sContainerdNamespace is the namespace we use to connect containerd. K8sContainerdNamespace = "k8s.io" - // CRIVersion is the CRI version supported by the CRI plugin. - CRIVersion = "v1alpha2" + // CRIVersion is the latest CRI version supported by the CRI plugin. + CRIVersion = "v1" + // CRIVersionAlpha is the alpha version of CRI supported by the CRI plugin. + CRIVersionAlpha = "v1alpha2" ) diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go index 59d41411f23..2cdc97e1de5 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go @@ -25,7 +25,7 @@ import ( "github.com/containerd/containerd/cio" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // AttachOptions specifies how to attach to a container. diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go index 2780b958ab5..f0f90840160 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go index dcc9fe6eb94..b6c9c12028f 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,13 +17,13 @@ package io import ( + "fmt" "io" "net" "os" "sync" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -62,7 +60,7 @@ func openPipe(ctx context.Context, fn string, flag int, perm os.FileMode) (io.Re func (p *pipe) Write(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Write(b) } @@ -70,7 +68,7 @@ func (p *pipe) Write(b []byte) (int, error) { func (p *pipe) Read(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Read(b) } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go index 27721e71724..674a89fdff8 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go @@ -21,11 +21,10 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "time" "github.com/sirupsen/logrus" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cioutil "github.com/containerd/containerd/pkg/ioutil" ) @@ -43,7 +42,7 @@ const ( // NewDiscardLogger creates logger which discards all the input. func NewDiscardLogger() io.WriteCloser { - return cioutil.NewNopWriteCloser(ioutil.Discard) + return cioutil.NewNopWriteCloser(io.Discard) } // NewCRILogger returns a write closer which redirect container log into diff --git a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go index d0e0bf37efa..cf027eaf8c0 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go @@ -18,8 +18,8 @@ package util import ( "encoding/json" - - "github.com/pkg/errors" + "errors" + "fmt" ) // DeepCopy makes a deep copy from src into dst. @@ -32,11 +32,11 @@ func DeepCopy(dst interface{}, src interface{}) error { } bytes, err := json.Marshal(src) if err != nil { - return errors.Wrap(err, "unable to marshal src") + return fmt.Errorf("unable to marshal src: %w", err) } err = json.Unmarshal(bytes, dst) if err != nil { - return errors.Wrap(err, "unable to unmarshal into dst") + return fmt.Errorf("unable to unmarshal into dst: %w", err) } return nil } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go index aa604baab92..74c303b944f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go @@ -18,10 +18,9 @@ package dialer import ( "context" + "fmt" "net" "time" - - "github.com/pkg/errors" ) type dialResult struct { @@ -74,6 +73,6 @@ func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) { dr.c.Close() } }() - return nil, errors.Errorf("dial %s: timeout", address) + return nil, fmt.Errorf("dial %s: timeout", address) } } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go index e7d19583395..b4304ffbf1f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go new file mode 100644 index 00000000000..bc1af75abb7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shutdown + +import ( + "context" + "errors" + "sync" + "time" + + "golang.org/x/sync/errgroup" +) + +// ErrShutdown is the error condition when a context has been fully shutdown +var ErrShutdown = errors.New("shutdown") + +// Service is used to facilitate shutdown by through callback +// registration and shutdown initiation +type Service interface { + // Shutdown initiates shutdown + Shutdown() + // RegisterCallback registers functions to be called on shutdown and before + // the shutdown channel is closed. A callback error will propagate to the + // context error + RegisterCallback(func(context.Context) error) +} + +// WithShutdown returns a context which is similar to a cancel context, but +// with callbacks which can propagate to the context error. Unlike a cancel +// context, the shutdown context cannot be canceled from the parent context. +// However, future child contexes will be canceled upon shutdown. +func WithShutdown(ctx context.Context) (context.Context, Service) { + ss := &shutdownService{ + Context: ctx, + doneC: make(chan struct{}), + timeout: 30 * time.Second, + } + return ss, ss +} + +type shutdownService struct { + context.Context + + mu sync.Mutex + isShutdown bool + callbacks []func(context.Context) error + doneC chan struct{} + err error + timeout time.Duration +} + +func (s *shutdownService) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + if s.isShutdown { + return + } + s.isShutdown = true + + go func(callbacks []func(context.Context) error) { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + grp, ctx := errgroup.WithContext(ctx) + for i := range callbacks { + fn := callbacks[i] + grp.Go(func() error { return fn(ctx) }) + } + err := grp.Wait() + if err == nil { + err = ErrShutdown + } + s.mu.Lock() + s.err = err + close(s.doneC) + s.mu.Unlock() + }(s.callbacks) +} + +func (s *shutdownService) Done() <-chan struct{} { + return s.doneC +} + +func (s *shutdownService) Err() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.err +} +func (s *shutdownService) RegisterCallback(fn func(context.Context) error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.callbacks == nil { + s.callbacks = []func(context.Context) error{} + } + s.callbacks = append(s.callbacks, fn) +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go index 8b4d925d28e..f05ab7aa9f9 100644 --- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go @@ -17,13 +17,14 @@ package ttrpcutil import ( + "errors" + "fmt" "sync" "time" v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/ttrpc" - "github.com/pkg/errors" ) const ttrpcDialTimeout = 5 * time.Second @@ -43,7 +44,7 @@ func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { connector := func() (*ttrpc.Client, error) { conn, err := dialer.Dialer(address, ttrpcDialTimeout) if err != nil { - return nil, errors.Wrap(err, "failed to connect") + return nil, fmt.Errorf("failed to connect: %w", err) } client := ttrpc.NewClient(conn, opts...) diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go index aab756fd2a6..4f8d7dd2d59 100644 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go index c7657e1869b..3913ef66373 100644 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -38,12 +38,22 @@ func platformVector(platform specs.Platform) []specs.Platform { switch platform.Architecture { case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } vector = append(vector, specs.Platform{ Architecture: "386", OS: platform.OS, OSVersion: platform.OSVersion, OSFeatures: platform.OSFeatures, - Variant: platform.Variant, }) case "arm": if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index 4a7177e3138..046e0356d19 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -18,6 +18,7 @@ package platforms import ( "bufio" + "fmt" "os" "runtime" "strings" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) // Present the ARM instruction set architecture, eg: v7, v8 @@ -48,7 +48,7 @@ func cpuVariant() string { // by ourselves. We can just parse these information from /proc/cpuinfo func getCPUInfo(pattern string) (info string, err error) { if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) } cpuinfo, err := os.Open("/proc/cpuinfo") @@ -75,7 +75,7 @@ func getCPUInfo(pattern string) (info string, err error) { return "", err } - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) + return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) } func getCPUVariant() string { diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index 6ede94061eb..dbe9957ca9d 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -38,7 +38,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +60,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false @@ -86,9 +86,11 @@ func normalizeArch(arch, variant string) (string, string) { case "i386": arch = "386" variant = "" - case "x86_64", "x86-64": + case "x86_64", "x86-64", "amd64": arch = "amd64" - variant = "" + if variant == "v1" { + variant = "" + } case "aarch64", "arm64": arch = "arm64" switch variant { diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go index cb77fbc9f7b..cfa3ff34a19 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -16,27 +16,11 @@ package platforms -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - // DefaultString returns the default string specifier for the platform. func DefaultString() string { return Format(DefaultSpec()) } -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - // DefaultStrict returns strict form of Default. func DefaultStrict() MatchComparer { return OnlyStrict(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go new file mode 100644 index 00000000000..e249fe48d38 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -0,0 +1,45 @@ +//go:build darwin +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go index e8a7d5ffa0d..49690f1b3e7 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. @@ -18,6 +19,22 @@ package platforms +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + // Default returns the default matcher for the platform. func Default() MatchComparer { return Only(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index 0c380e3b7c1..c1aaf72ca8e 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -29,6 +27,18 @@ import ( "golang.org/x/sys/windows" ) +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + type matchComparer struct { defaults Matcher osVersionPrefix string diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 088bdea0508..8f955d036df 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -107,6 +107,8 @@ package platforms import ( + "fmt" + "path" "regexp" "runtime" "strconv" @@ -114,7 +116,6 @@ import ( "github.com/containerd/containerd/errdefs" specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( @@ -166,14 +167,14 @@ func (m *matcher) String() string { func Parse(specifier string) (specs.Platform, error) { if strings.Contains(specifier, "*") { // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) } parts := strings.Split(specifier, "/") for _, part := range parts { if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) } } @@ -205,7 +206,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) case 2: // In this case, we treat as a regular os/arch pair. We don't care // about whether or not we know of the platform. @@ -227,7 +228,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) } // MustParse is like Parses but panics if the specifier cannot be parsed. @@ -246,20 +247,7 @@ func Format(platform specs.Platform) string { return "unknown" } - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") + return path.Join(platform.OS, platform.Architecture, platform.Variant) } // Normalize validates and translate the platform to the canonical value. @@ -269,10 +257,5 @@ func joinNotEmpty(s ...string) string { func Normalize(platform specs.Platform) specs.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - return platform } diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go new file mode 100644 index 00000000000..dcb533c8a74 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -0,0 +1,171 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// InitContext is used for plugin initialization +type InitContext struct { + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + + // deprecated: will be removed in 2.0, use plugin.EventType + Events *exchange.Exchange + + Meta *Meta // plugins can fill in metadata at init. + + plugins *Set +} + +// NewContext returns a new plugin InitContext +func NewContext(ctx context.Context, r *Registration, plugins *Set, root, state string) *InitContext { + return &InitContext{ + Context: ctx, + Root: filepath.Join(root, r.URI()), + State: filepath.Join(state, r.URI()), + Meta: &Meta{ + Exports: map[string]string{}, + }, + plugins: plugins, + } +} + +// Get returns the first plugin by its type +func (i *InitContext) Get(t Type) (interface{}, error) { + return i.plugins.Get(t) +} + +// Meta contains information gathered from the registration and initialization +// process. +type Meta struct { + Platforms []ocispec.Platform // platforms supported by plugin + Exports map[string]string // values exported by plugin + Capabilities []string // feature switches for plugin +} + +// Plugin represents an initialized plugin, used with an init context. +type Plugin struct { + Registration *Registration // registration, as initialized + Config interface{} // config, as initialized + Meta *Meta + + instance interface{} + err error // will be set if there was an error initializing the plugin +} + +// Err returns the errors during initialization. +// returns nil if not error was encountered +func (p *Plugin) Err() error { + return p.err +} + +// Instance returns the instance and any initialization error of the plugin +func (p *Plugin) Instance() (interface{}, error) { + return p.instance, p.err +} + +// Set defines a plugin collection, used with InitContext. +// +// This maintains ordering and unique indexing over the set. +// +// After iteratively instantiating plugins, this set should represent, the +// ordered, initialization set of plugins for a containerd instance. +type Set struct { + ordered []*Plugin // order of initialization + byTypeAndID map[Type]map[string]*Plugin +} + +// NewPluginSet returns an initialized plugin set +func NewPluginSet() *Set { + return &Set{ + byTypeAndID: make(map[Type]map[string]*Plugin), + } +} + +// Add a plugin to the set +func (ps *Set) Add(p *Plugin) error { + if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok { + ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{ + p.Registration.ID: p, + } + } else if _, idok := byID[p.Registration.ID]; !idok { + byID[p.Registration.ID] = p + } else { + return fmt.Errorf("plugin %v already initialized: %w", p.Registration.URI(), errdefs.ErrAlreadyExists) + } + + ps.ordered = append(ps.ordered, p) + return nil +} + +// Get returns the first plugin by its type +func (ps *Set) Get(t Type) (interface{}, error) { + for _, v := range ps.byTypeAndID[t] { + return v.Instance() + } + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) +} + +// GetAll returns all initialized plugins +func (ps *Set) GetAll() []*Plugin { + return ps.ordered +} + +// Plugins returns plugin set +func (i *InitContext) Plugins() *Set { + return i.plugins +} + +// GetAll plugins in the set +func (i *InitContext) GetAll() []*Plugin { + return i.plugins.GetAll() +} + +// GetByID returns the plugin of the given type and ID +func (i *InitContext) GetByID(t Type, id string) (interface{}, error) { + ps, err := i.GetByType(t) + if err != nil { + return nil, err + } + p, ok := ps[id] + if !ok { + return nil, fmt.Errorf("no %s plugins with id %s: %w", t, id, errdefs.ErrNotFound) + } + return p.Instance() +} + +// GetByType returns all plugins with the specific type. +func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) { + p, ok := i.plugins.byTypeAndID[t] + if !ok { + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) + } + + return p, nil +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go new file mode 100644 index 00000000000..eb38c127157 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -0,0 +1,223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "errors" + "fmt" + "sync" +) + +var ( + // ErrNoType is returned when no type is specified + ErrNoType = errors.New("plugin: no type") + // ErrNoPluginID is returned when no id is specified + ErrNoPluginID = errors.New("plugin: no id") + // ErrIDRegistered is returned when a duplicate id is already registered + ErrIDRegistered = errors.New("plugin: id already registered") + // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded, + // this allows the plugin loader differentiate between a plugin which is configured + // not to load and one that fails to load. + ErrSkipPlugin = errors.New("skip plugin") + + // ErrInvalidRequires will be thrown if the requirements for a plugin are + // defined in an invalid manner. + ErrInvalidRequires = errors.New("invalid requires") +) + +// IsSkipPlugin returns true if the error is skipping the plugin +func IsSkipPlugin(err error) bool { + return errors.Is(err, ErrSkipPlugin) +} + +// Type is the type of the plugin +type Type string + +func (t Type) String() string { return string(t) } + +const ( + // InternalPlugin implements an internal plugin to containerd + InternalPlugin Type = "io.containerd.internal.v1" + // RuntimePlugin implements a runtime + RuntimePlugin Type = "io.containerd.runtime.v1" + // RuntimePluginV2 implements a runtime v2 + RuntimePluginV2 Type = "io.containerd.runtime.v2" + // ServicePlugin implements a internal service + ServicePlugin Type = "io.containerd.service.v1" + // GRPCPlugin implements a grpc service + GRPCPlugin Type = "io.containerd.grpc.v1" + // TTRPCPlugin implements a ttrpc shim service + TTRPCPlugin Type = "io.containerd.ttrpc.v1" + // SnapshotPlugin implements a snapshotter + SnapshotPlugin Type = "io.containerd.snapshotter.v1" + // TaskMonitorPlugin implements a task monitor + TaskMonitorPlugin Type = "io.containerd.monitor.v1" + // DiffPlugin implements a differ + DiffPlugin Type = "io.containerd.differ.v1" + // MetadataPlugin implements a metadata store + MetadataPlugin Type = "io.containerd.metadata.v1" + // ContentPlugin implements a content store + ContentPlugin Type = "io.containerd.content.v1" + // GCPlugin implements garbage collection policy + GCPlugin Type = "io.containerd.gc.v1" + // EventPlugin implements event handling + EventPlugin Type = "io.containerd.event.v1" + // TracingProcessorPlugin implements a open telemetry span processor + TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1" +) + +const ( + // RuntimeLinuxV1 is the legacy linux runtime + RuntimeLinuxV1 = "io.containerd.runtime.v1.linux" + // RuntimeRuncV1 is the runc runtime that supports a single container + RuntimeRuncV1 = "io.containerd.runc.v1" + // RuntimeRuncV2 is the runc runtime that supports multiple containers per shim + RuntimeRuncV2 = "io.containerd.runc.v2" +) + +// Registration contains information for registering a plugin +type Registration struct { + // Type of the plugin + Type Type + // ID of the plugin + ID string + // Config specific to the plugin + Config interface{} + // Requires is a list of plugins that the registered plugin requires to be available + Requires []Type + + // InitFn is called when initializing a plugin. The registration and + // context are passed in. The init function may modify the registration to + // add exports, capabilities and platform support declarations. + InitFn func(*InitContext) (interface{}, error) + // Disable the plugin from loading + Disable bool +} + +// Init the registered plugin +func (r *Registration) Init(ic *InitContext) *Plugin { + p, err := r.InitFn(ic) + return &Plugin{ + Registration: r, + Config: ic.Config, + Meta: ic.Meta, + instance: p, + err: err, + } +} + +// URI returns the full plugin URI +func (r *Registration) URI() string { + return fmt.Sprintf("%s.%s", r.Type, r.ID) +} + +var register = struct { + sync.RWMutex + r []*Registration +}{} + +// Load loads all plugins at the provided path into containerd +func Load(path string) (err error) { + defer func() { + if v := recover(); v != nil { + rerr, ok := v.(error) + if !ok { + rerr = fmt.Errorf("%s", v) + } + err = rerr + } + }() + return loadPlugins(path) +} + +// Register allows plugins to register +func Register(r *Registration) { + register.Lock() + defer register.Unlock() + + if r.Type == "" { + panic(ErrNoType) + } + if r.ID == "" { + panic(ErrNoPluginID) + } + if err := checkUnique(r); err != nil { + panic(err) + } + + for _, requires := range r.Requires { + if requires == "*" && len(r.Requires) != 1 { + panic(ErrInvalidRequires) + } + } + + register.r = append(register.r, r) +} + +func checkUnique(r *Registration) error { + for _, registered := range register.r { + if r.URI() == registered.URI() { + return fmt.Errorf("%s: %w", r.URI(), ErrIDRegistered) + } + } + return nil +} + +// DisableFilter filters out disabled plugins +type DisableFilter func(r *Registration) bool + +// Graph returns an ordered list of registered plugins for initialization. +// Plugins in disableList specified by id will be disabled. +func Graph(filter DisableFilter) (ordered []*Registration) { + register.RLock() + defer register.RUnlock() + + for _, r := range register.r { + if filter(r) { + r.Disable = true + } + } + + added := map[*Registration]bool{} + for _, r := range register.r { + if r.Disable { + continue + } + children(r, added, &ordered) + if !added[r] { + ordered = append(ordered, r) + added[r] = true + } + } + return ordered +} + +func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) { + for _, t := range reg.Requires { + for _, r := range register.r { + if !r.Disable && + r.URI() != reg.URI() && + (t == "*" || r.Type == t) { + children(r, added, ordered) + if !added[r] { + *ordered = append(*ordered, r) + added[r] = true + } + } + } + } +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go new file mode 100644 index 00000000000..0df0669d29a --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -0,0 +1,63 @@ +//go:build go1.8 && !windows && amd64 && !static_build && !gccgo +// +build go1.8,!windows,amd64,!static_build,!gccgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "path/filepath" + "plugin" + "runtime" +) + +// loadPlugins loads all plugins for the OS and Arch +// that containerd is built for inside the provided path +func loadPlugins(path string) error { + abs, err := filepath.Abs(path) + if err != nil { + return err + } + pattern := filepath.Join(abs, fmt.Sprintf( + "*-%s-%s.%s", + runtime.GOOS, + runtime.GOARCH, + getLibExt(), + )) + libs, err := filepath.Glob(pattern) + if err != nil { + return err + } + for _, lib := range libs { + if _, err := plugin.Open(lib); err != nil { + return err + } + } + return nil +} + +// getLibExt returns a platform specific lib extension for +// the platform that containerd is running on +func getLibExt() string { + switch runtime.GOOS { + case "windows": + return "dll" + default: + return "so" + } +} diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go similarity index 67% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go rename to vendor/github.com/containerd/containerd/plugin/plugin_other.go index 6e40a9c7d7f..a2883bbbadf 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,3 +1,6 @@ +//go:build !go1.8 || windows || !amd64 || static_build || gccgo +// +build !go1.8 windows !amd64 static_build gccgo + /* Copyright The containerd Authors. @@ -14,17 +17,9 @@ limitations under the License. */ -package sys - -import ( - _ "unsafe" // required for go:linkname. -) - -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() +package plugin -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() +func loadPlugins(path string) error { + // plugins not supported until 1.8 + return nil +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go index 0998639b030..6fa97dfdca9 100644 --- a/vendor/github.com/containerd/containerd/reference/docker/reference.go +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -338,11 +338,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) } + return repo } func getBestReferenceType(ref reference) Reference { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index c14aacca99b..9d3a904237e 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -18,6 +18,7 @@ package shim import ( "context" + "errors" "flag" "fmt" "io" @@ -30,21 +31,15 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/plugin" shimapi "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/containerd/version" "github.com/containerd/ttrpc" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Client for a shim server -type Client struct { - service shimapi.TaskService - context context.Context - signals chan os.Signal -} - // Publisher for events type Publisher interface { events.Publisher @@ -53,22 +48,37 @@ type Publisher interface { // StartOpts describes shim start configuration received from containerd type StartOpts struct { - ID string + ID string // TODO(2.0): Remove ID, passed directly to start for call symmetry ContainerdBinary string Address string TTRPCAddress string } +type StopStatus struct { + Pid int + ExitStatus int + ExitedAt time.Time +} + // Init func for the creation of a shim server +// TODO(2.0): Remove init function type Init func(context.Context, string, Publisher, func()) (Shim, error) // Shim server interface +// TODO(2.0): Remove unified shim interface type Shim interface { shimapi.TaskService Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) StartShim(ctx context.Context, opts StartOpts) (string, error) } +// Manager is the interface which manages the shim process +type Manager interface { + Name() string + Start(ctx context.Context, id string, opts StartOpts) (string, error) + Stop(ctx context.Context, id string) (StopStatus, error) +} + // OptsKey is the context key for the Opts value. type OptsKey struct{} @@ -91,10 +101,23 @@ type Config struct { NoSetupLogger bool } +type ttrpcService interface { + RegisterTTRPC(*ttrpc.Server) error +} + +type taskService struct { + shimapi.TaskService +} + +func (t taskService) RegisterTTRPC(server *ttrpc.Server) error { + shimapi.RegisterTaskService(server, t.TaskService) + return nil +} + var ( debugFlag bool versionFlag bool - idFlag string + id string namespaceFlag string socketFlag string bundlePath string @@ -111,7 +134,7 @@ func parseFlags() { flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") flag.BoolVar(&versionFlag, "v", false, "show the shim version and exit") flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") - flag.StringVar(&idFlag, "id", "", "id of the task") + flag.StringVar(&id, "id", "", "id of the task") flag.StringVar(&socketFlag, "socket", "", "socket path to serve") flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") @@ -136,35 +159,85 @@ func setRuntime() { } } -func setLogger(ctx context.Context, id string) error { - logrus.SetFormatter(&logrus.TextFormatter{ +func setLogger(ctx context.Context, id string) (context.Context, error) { + l := log.G(ctx) + l.Logger.SetFormatter(&logrus.TextFormatter{ TimestampFormat: log.RFC3339NanoFixed, FullTimestamp: true, }) if debugFlag { - logrus.SetLevel(logrus.DebugLevel) + l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) - if err != nil { - return err + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + return ctx, err } - logrus.SetOutput(f) - return nil + l.Logger.SetOutput(f) + return log.WithLogger(ctx, l), nil } // Run initializes and runs a shim server -func Run(id string, initFunc Init, opts ...BinaryOpts) { +// TODO(2.0): Remove function +func Run(name string, initFunc Init, opts ...BinaryOpts) { + var config Config + for _, o := range opts { + o(&config) + } + + ctx := context.Background() + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", name)) + + if err := run(ctx, nil, initFunc, name, config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", name, err) + os.Exit(1) + } +} + +// TODO(2.0): Remove this type +type shimToManager struct { + shim Shim + name string +} + +func (stm shimToManager) Name() string { + return stm.name +} + +func (stm shimToManager) Start(ctx context.Context, id string, opts StartOpts) (string, error) { + opts.ID = id + return stm.shim.StartShim(ctx, opts) +} + +func (stm shimToManager) Stop(ctx context.Context, id string) (StopStatus, error) { + // shim must already have id + dr, err := stm.shim.Cleanup(ctx) + if err != nil { + return StopStatus{}, err + } + return StopStatus{ + Pid: int(dr.Pid), + ExitStatus: int(dr.ExitStatus), + ExitedAt: dr.ExitedAt, + }, nil +} + +// RunManager initialzes and runs a shim server +// TODO(2.0): Rename to Run +func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { var config Config for _, o := range opts { o(&config) } - if err := run(id, initFunc, config); err != nil { - fmt.Fprintf(os.Stderr, "%s: %s\n", id, err) + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", manager.Name())) + + if err := run(ctx, manager, nil, "", config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", manager.Name(), err) os.Exit(1) } } -func run(id string, initFunc Init, config Config) error { +func run(ctx context.Context, manager Manager, initFunc Init, name string, config Config) error { parseFlags() if versionFlag { fmt.Printf("%s:\n", os.Args[0]) @@ -182,12 +255,12 @@ func run(id string, initFunc Init, config Config) error { setRuntime() signals, err := setupSignals(config) - if err != nil { + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } if !config.NoSubreaper { - if err := subreaper(); err != nil { + if err := subreaper(); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } } @@ -199,27 +272,49 @@ func run(id string, initFunc Init, config Config) error { } defer publisher.Close() - ctx := namespaces.WithNamespace(context.Background(), namespaceFlag) + ctx = namespaces.WithNamespace(ctx, namespaceFlag) ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag}) - ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id)) - ctx, cancel := context.WithCancel(ctx) - service, err := initFunc(ctx, idFlag, publisher, cancel) - if err != nil { - return err + ctx, sd := shutdown.WithShutdown(ctx) + defer sd.Shutdown() + + if manager == nil { + service, err := initFunc(ctx, id, publisher, sd.Shutdown) + if err != nil { + return err + } + plugin.Register(&plugin.Registration{ + Type: plugin.TTRPCPlugin, + ID: "task", + Requires: []plugin.Type{ + plugin.EventPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return taskService{service}, nil + }, + }) + manager = shimToManager{ + shim: service, + name: name, + } } + // Handle explicit actions switch action { case "delete": - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "namespace": namespaceFlag, }) - go handleSignals(ctx, logger, signals) - response, err := service.Cleanup(ctx) + go reap(ctx, logger, signals) + ss, err := manager.Stop(ctx, id) if err != nil { return err } - data, err := proto.Marshal(response) + data, err := proto.Marshal(&shimapi.DeleteResponse{ + Pid: uint32(ss.Pid), + ExitStatus: uint32(ss.ExitStatus), + ExitedAt: ss.ExitedAt, + }) if err != nil { return err } @@ -229,12 +324,12 @@ func run(id string, initFunc Init, config Config) error { return nil case "start": opts := StartOpts{ - ID: idFlag, ContainerdBinary: containerdBinaryFlag, Address: addressFlag, TTRPCAddress: ttrpcAddress, } - address, err := service.StartShim(ctx, opts) + + address, err := manager.Start(ctx, id, opts) if err != nil { return err } @@ -242,46 +337,120 @@ func run(id string, initFunc Init, config Config) error { return err } return nil - default: - if !config.NoSetupLogger { - if err := setLogger(ctx, idFlag); err != nil { - return err - } + } + + if !config.NoSetupLogger { + ctx, err = setLogger(ctx, id) + if err != nil { + return err } - client := NewShimClient(ctx, service, signals) - if err := client.Serve(); err != nil { - if err != context.Canceled { - return err + } + + plugin.Register(&plugin.Registration{ + Type: plugin.InternalPlugin, + ID: "shutdown", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return sd, nil + }, + }) + + // Register event plugin + plugin.Register(&plugin.Registration{ + Type: plugin.EventPlugin, + ID: "publisher", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return publisher, nil + }, + }) + + var ( + initialized = plugin.NewPluginSet() + ttrpcServices = []ttrpcService{} + ) + plugins := plugin.Graph(func(*plugin.Registration) bool { return false }) + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + // NOTE: Root is empty since the shim does not support persistent storage, + // shim plugins should make use state directory for writing files to disk. + // The state directory will be destroyed when the shim if cleaned up or + // on reboot + "", + bundlePath, + ) + initContext.Address = addressFlag + initContext.TTRPCAddress = ttrpcAddress + + // load the plugin specific configuration if it is provided + //TODO: Read configuration passed into shim, or from state directory? + //if p.Config != nil { + // pc, err := config.Decode(p) + // if err != nil { + // return nil, err + // } + // initContext.Config = pc + //} + + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return fmt.Errorf("could not add plugin result to plugin set: %w", err) + } + + instance, err := result.Instance() + if err != nil { + if plugin.IsSkipPlugin(err) { + log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id) + } else { + log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) } + continue } - // NOTE: If the shim server is down(like oom killer), the address - // socket might be leaking. - if address, err := ReadAddress("address"); err == nil { - _ = RemoveSocket(address) + if src, ok := instance.(ttrpcService); ok { + logrus.WithField("id", id).Debug("registering ttrpc service") + ttrpcServices = append(ttrpcServices, src) + } + } + + server, err := newServer() + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + return fmt.Errorf("failed creating server: %w", err) + } + + for _, srv := range ttrpcServices { + if err := srv.RegisterTTRPC(server); err != nil { + return fmt.Errorf("failed to register service: %w", err) } + } - select { - case <-publisher.Done(): - return nil - case <-time.After(5 * time.Second): - return errors.New("publisher not closed") + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != shutdown.ErrShutdown { + return err } } -} -// NewShimClient creates a new shim server client -func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client { - s := &Client{ - service: svc, - context: ctx, - signals: signals, + // NOTE: If the shim server is down(like oom killer), the address + // socket might be leaking. + if address, err := ReadAddress("address"); err == nil { + _ = RemoveSocket(address) + } + + select { + case <-publisher.Done(): + return nil + case <-time.After(5 * time.Second): + return errors.New("publisher not closed") } - return s } -// Serve the shim server -func (s *Client) Serve() error { +// serve serves the ttrpc API over a unix socket in the current working directory +// and blocks until the context is canceled +func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, shutdown func()) error { dump := make(chan os.Signal, 32) setupDumpStacks(dump) @@ -289,18 +458,19 @@ func (s *Client) Serve() error { if err != nil { return err } - server, err := newServer() - if err != nil { - return errors.Wrap(err, "failed creating server") - } - logrus.Debug("registering ttrpc server") - shimapi.RegisterTaskService(server, s.service) - - if err := serve(s.context, server, socketFlag); err != nil { + l, err := serveListener(socketFlag) + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } - logger := logrus.WithFields(logrus.Fields{ + go func() { + defer l.Close() + if err := server.Serve(ctx, l); err != nil && + !strings.Contains(err.Error(), "use of closed network connection") { + log.G(ctx).WithError(err).Fatal("containerd-shim: ttrpc server failure") + } + }() + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "path": path, "namespace": namespaceFlag, @@ -310,24 +480,9 @@ func (s *Client) Serve() error { dumpStacks(logger) } }() - return handleSignals(s.context, logger, s.signals) -} -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block -func serve(ctx context.Context, server *ttrpc.Server, path string) error { - l, err := serveListener(path) - if err != nil { - return err - } - go func() { - defer l.Close() - if err := server.Serve(ctx, l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure") - } - }() - return nil + go handleExitSignals(ctx, logger, shutdown) + return reap(ctx, logger, signals) } func dumpStacks(logger *logrus.Entry) { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go index 314b45cb548..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go @@ -1,5 +1,3 @@ -// +build darwin - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go index 4bc636280d0..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go index a61b6420892..e2dab0931ea 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,6 +21,7 @@ package shim import ( "context" + "fmt" "io" "net" "os" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/sys/reaper" "github.com/containerd/fifo" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -59,7 +60,7 @@ func serveListener(path string) (net.Listener, error) { path = "[inherited from parent]" } else { if len(path) > socketPathLimit { - return nil, errors.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) + return nil, fmt.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) } l, err = net.Listen("unix", path) } @@ -70,7 +71,7 @@ func serveListener(path string) (net.Listener, error) { return l, nil } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { logger.Info("starting signal loop") for { @@ -78,6 +79,8 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si case <-ctx.Done(): return ctx.Err() case s := <-signals: + // Exit signals are handled separately from this loop + // They get registered with this channel so that we can ignore such signals for short-running actions (e.g. `delete`) switch s { case unix.SIGCHLD: if err := reaper.Reap(); err != nil { @@ -89,6 +92,22 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si } } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { + ch := make(chan os.Signal, 32) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + + for { + select { + case s := <-ch: + logger.WithField("signal", s).Debugf("Caught exit signal") + cancel() + return + case <-ctx.Done(): + return + } + } +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return fifo.OpenFifoDup2(ctx, "log", unix.O_WRONLY, 0700, int(os.Stderr.Fd())) } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 7339eb2a2e4..4b098ab1630 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,12 +18,12 @@ package shim import ( "context" + "errors" "io" "net" "os" "github.com/containerd/ttrpc" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -48,10 +46,13 @@ func serveListener(path string) (net.Listener, error) { return nil, errors.New("not supported") } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { return errors.New("not supported") } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return nil, errors.New("not supported") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index 52bfaa9ce71..28ac9d1e799 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -19,26 +19,32 @@ package shim import ( "bytes" "context" + "errors" "fmt" - "io/ioutil" "net" "os" - "os/exec" "path/filepath" "strings" - "sync" "time" "github.com/containerd/containerd/namespaces" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) -var runtimePaths sync.Map +type CommandConfig struct { + Runtime string + Address string + TTRPCAddress string + Path string + SchedCore bool + Args []string + Opts *types.Any +} // Command returns the shim command with the provided args and configuration -func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAddress, path string, opts *types.Any, cmdArgs ...string) (*exec.Cmd, error) { +func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -49,67 +55,23 @@ func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAdd } args := []string{ "-namespace", ns, - "-address", containerdAddress, + "-address", config.Address, "-publish-binary", self, } - args = append(args, cmdArgs...) - name := BinaryName(runtime) - if name == "" { - return nil, fmt.Errorf("invalid runtime name %s, correct runtime name should format like io.containerd.runc.v1", runtime) - } - - var cmdPath string - cmdPathI, cmdPathFound := runtimePaths.Load(name) - if cmdPathFound { - cmdPath = cmdPathI.(string) - } else { - var lerr error - binaryPath := BinaryPath(runtime) - if _, serr := os.Stat(binaryPath); serr == nil { - cmdPath = binaryPath - } - - if cmdPath == "" { - if cmdPath, lerr = exec.LookPath(name); lerr != nil { - if eerr, ok := lerr.(*exec.Error); ok { - if eerr.Err == exec.ErrNotFound { - // LookPath only finds current directory matches based on - // the callers current directory but the caller is not - // likely in the same directory as the containerd - // executables. Instead match the calling binaries path - // (containerd) and see if they are side by side. If so - // execute the shim found there. - testPath := filepath.Join(filepath.Dir(self), name) - if _, serr := os.Stat(testPath); serr == nil { - cmdPath = testPath - } - if cmdPath == "" { - return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) - } - } - } - } - } - cmdPath, err = filepath.Abs(cmdPath) - if err != nil { - return nil, err - } - if cmdPathI, cmdPathFound = runtimePaths.LoadOrStore(name, cmdPath); cmdPathFound { - // We didn't store cmdPath we loaded an already cached value. Use it. - cmdPath = cmdPathI.(string) - } - } - - cmd := exec.Command(cmdPath, args...) - cmd.Dir = path + args = append(args, config.Args...) + cmd := exec.CommandContext(ctx, config.Runtime, args...) + cmd.Dir = config.Path cmd.Env = append( os.Environ(), "GOMAXPROCS=2", - fmt.Sprintf("%s=%s", ttrpcAddressEnv, containerdTTRPCAddress), + fmt.Sprintf("%s=%s", ttrpcAddressEnv, config.TTRPCAddress), ) + if config.SchedCore { + cmd.Env = append(cmd.Env, "SCHED_CORE=1") + } cmd.SysProcAttr = getSysProcAttr() - if opts != nil { - d, err := proto.Marshal(opts) + if config.Opts != nil { + d, err := proto.Marshal(config.Opts) if err != nil { return nil, err } @@ -196,7 +158,7 @@ func ReadAddress(path string) (string, error) { if err != nil { return "", err } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index f956b0986bd..4e2309a8069 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -29,10 +30,9 @@ import ( "syscall" "time" + "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/sys" - "github.com/pkg/errors" ) const ( @@ -53,16 +53,16 @@ func AdjustOOMScore(pid int) error { parent := os.Getppid() score, err := sys.GetOOMScoreAdj(parent) if err != nil { - return errors.Wrap(err, "get parent OOM score") + return fmt.Errorf("get parent OOM score: %w", err) } shimScore := score + 1 if err := sys.AdjustOOMScore(pid, shimScore); err != nil { - return errors.Wrap(err, "set shim OOM score") + return fmt.Errorf("set shim OOM score: %w", err) } return nil } -const socketRoot = "/run/containerd" +const socketRoot = defaults.DefaultStateDir // SocketAddress returns a socket address func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { @@ -76,7 +76,7 @@ func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { // AnonDialer returns a dialer for a socket func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { - return dialer.Dialer(socket(address).path(), timeout) + return net.DialTimeout("unix", socket(address).path(), timeout) } // AnonReconnectDialer returns a dialer for an existing socket on reconnection @@ -90,19 +90,25 @@ func NewSocket(address string) (*net.UnixListener, error) { sock = socket(address) path = sock.path() ) - if !sock.isAbstract() { + + isAbstract := sock.isAbstract() + + if !isAbstract { if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, errors.Wrapf(err, "%s", path) + return nil, fmt.Errorf("%s: %w", path, err) } } l, err := net.Listen("unix", path) if err != nil { return nil, err } - if err := os.Chmod(path, 0600); err != nil { - os.Remove(sock.path()) - l.Close() - return nil, err + + if !isAbstract { + if err := os.Chmod(path, 0600); err != nil { + os.Remove(sock.path()) + l.Close() + return nil, err + } } return l.(*net.UnixListener), nil } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go index 325c290043f..b9042841b1a 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go @@ -18,13 +18,13 @@ package shim import ( "context" + "fmt" "net" "os" "syscall" "time" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" ) const shimBinaryFormat = "containerd-shim-%s-%s.exe" @@ -40,9 +40,9 @@ func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error c, err := winio.DialPipeContext(ctx, address) if os.IsNotExist(err) { - return nil, errors.Wrap(os.ErrNotExist, "npipe not found on reconnect") + return nil, fmt.Errorf("npipe not found on reconnect: %w", os.ErrNotExist) } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } else if err != nil { return nil, err } @@ -65,14 +65,14 @@ func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { if os.IsNotExist(err) { select { case <-serveTimer.C: - return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + return nil, fmt.Errorf("pipe not found before timeout: %w", os.ErrNotExist) default: // Wait 10ms for the shim to serve and try again. time.Sleep(10 * time.Millisecond) continue } } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } return nil, err } diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go index 28d6c2cabc6..73a57013ff1 100644 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ b/vendor/github.com/containerd/containerd/sys/epoll.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go index db3cf702f49..a71a9cd7e93 100644 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ b/vendor/github.com/containerd/containerd/sys/fds.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin // +build !windows,!darwin /* @@ -19,14 +20,14 @@ package sys import ( - "io/ioutil" + "os" "path/filepath" "strconv" ) // GetOpenFds returns the number of open fds for the process provided by pid func GetOpenFds(pid int) (int, error) { - dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) if err != nil { return -1, err } diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index d8329af9fbc..805a7a736fa 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index a9198ef399a..87ebacc2006 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,6 +17,7 @@ package sys import ( + "fmt" "os" "path/filepath" "regexp" @@ -29,7 +28,6 @@ import ( "unsafe" "github.com/Microsoft/hcsshim" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -270,7 +268,7 @@ func ForceRemoveAll(path string) error { snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { if err := cleanupWCOWLayers(snapshotDir); err != nil { - return errors.Wrapf(err, "failed to cleanup WCOW layers in %s", snapshotDir) + return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) } } @@ -280,12 +278,22 @@ func ForceRemoveAll(path string) error { func cleanupWCOWLayers(root string) error { // See snapshots/windows/windows.go getSnapshotDir() var layerNums []int + var rmLayerNums []int if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if path != root && info.IsDir() { - if layerNum, err := strconv.Atoi(filepath.Base(path)); err == nil { - layerNums = append(layerNums, layerNum) + name := filepath.Base(path) + if strings.HasPrefix(name, "rm-") { + layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) + if err != nil { + return err + } + rmLayerNums = append(rmLayerNums, layerNum) } else { - return err + layerNum, err := strconv.Atoi(name) + if err != nil { + return err + } + layerNums = append(layerNums, layerNum) } return filepath.SkipDir } @@ -295,8 +303,14 @@ func cleanupWCOWLayers(root string) error { return err } - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) + sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) + for _, rmLayerNum := range rmLayerNums { + if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { + return err + } + } + sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) for _, layerNum := range layerNums { if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { return err @@ -311,19 +325,20 @@ func cleanupWCOWLayer(layerPath string) error { HomeDir: filepath.Dir(layerPath), } - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared. + // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. + // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || hcserror.Err != windows.ERROR_DEV_NOT_EXIST { - return errors.Wrapf(err, "failed to unprepare %s", layerPath) + if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { + return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) } } if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to deactivate %s", layerPath) + return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) } if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to destroy %s", layerPath) + return fmt.Errorf("failed to destroy %s: %w", layerPath, err) } return nil diff --git a/vendor/github.com/containerd/containerd/sys/mount_linux.go b/vendor/github.com/containerd/containerd/sys/mount_linux.go deleted file mode 100644 index a21045529ae..00000000000 --- a/vendor/github.com/containerd/containerd/sys/mount_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "runtime" - "syscall" - "unsafe" - - "github.com/containerd/containerd/log" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// FMountat performs mount from the provided directory. -func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { - var ( - sourceP, targetP, fstypeP, dataP *byte - pid uintptr - err error - errno, status syscall.Errno - ) - - sourceP, err = syscall.BytePtrFromString(source) - if err != nil { - return err - } - - targetP, err = syscall.BytePtrFromString(target) - if err != nil { - return err - } - - fstypeP, err = syscall.BytePtrFromString(fstype) - if err != nil { - return err - } - - if data != "" { - dataP, err = syscall.BytePtrFromString(data) - if err != nil { - return err - } - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var pipefds [2]int - if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return errors.Wrap(err, "failed to open pipe") - } - - defer func() { - // close both ends of the pipe in a deferred function, since open file - // descriptor table is shared with child - syscall.Close(pipefds[0]) - syscall.Close(pipefds[1]) - }() - - pid, errno = forkAndMountat(dirfd, - uintptr(unsafe.Pointer(sourceP)), - uintptr(unsafe.Pointer(targetP)), - uintptr(unsafe.Pointer(fstypeP)), - flags, - uintptr(unsafe.Pointer(dataP)), - pipefds[1], - ) - - if errno != 0 { - return errors.Wrap(errno, "failed to fork thread") - } - - defer func() { - _, err := unix.Wait4(int(pid), nil, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), nil, 0, nil) - } - - if err != nil { - log.L.WithError(err).Debugf("failed to find pid=%d process", pid) - } - }() - - _, _, errno = syscall.RawSyscall(syscall.SYS_READ, - uintptr(pipefds[0]), - uintptr(unsafe.Pointer(&status)), - unsafe.Sizeof(status)) - if errno != 0 { - return errors.Wrap(errno, "failed to read pipe") - } - - if status != 0 { - return errors.Wrap(status, "failed to mount") - } - - return nil -} - -// forkAndMountat will fork thread, change working dir and mount. -// -// precondition: the runtime OS thread must be locked. -func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { - - // block signal during clone - beforeFork() - - // the cloned thread shares the open file descriptor, but the thread - // never be reused by runtime. - pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) - if errno != 0 || pid != 0 { - // restore all signals - afterFork() - return - } - - // restore all signals - afterForkInChild() - - // change working dir - _, _, errno = syscall.RawSyscall(syscall.SYS_FCHDIR, dirfd, 0, 0) - if errno != 0 { - goto childerr - } - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) - -childerr: - _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_linux.go b/vendor/github.com/containerd/containerd/sys/oom_linux.go index 82a347c6f72..bb2a3eafb4f 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_linux.go +++ b/vendor/github.com/containerd/containerd/sys/oom_linux.go @@ -18,7 +18,6 @@ package sys import ( "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -69,7 +68,7 @@ func SetOOMScore(pid, score int) error { // no oom score is set, or a sore is set to 0. func GetOOMScoreAdj(pid int) (int, error) { path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go index f5d7e9786b8..fa0db5a10e0 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go index 0033178dfa7..6c4f13b9088 100644 --- a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go +++ b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,12 +20,14 @@ package reaper import ( - "os/exec" + "errors" + "fmt" "sync" + "syscall" "time" runc "github.com/containerd/go-runc" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -115,6 +118,38 @@ func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { return -1, ErrNoSuchProcess } +// WaitTimeout is used to skip the blocked command and kill the left process. +func (m *Monitor) WaitTimeout(c *exec.Cmd, ec chan runc.Exit, timeout time.Duration) (int, error) { + type exitStatusWrapper struct { + status int + err error + } + + // capacity can make sure that the following goroutine will not be + // blocked if there is no receiver when timeout. + waitCh := make(chan *exitStatusWrapper, 1) + go func() { + defer close(waitCh) + + status, err := m.Wait(c, ec) + waitCh <- &exitStatusWrapper{ + status: status, + err: err, + } + }() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + select { + case <-timer.C: + syscall.Kill(c.Process.Pid, syscall.SIGKILL) + return 0, fmt.Errorf("timeout %v for cmd(pid=%d): %s, %s", timeout, c.Process.Pid, c.Path, c.Args) + case res := <-waitCh: + return res.status, res.err + } +} + // Subscribe to process exit changes func (m *Monitor) Subscribe() chan runc.Exit { c := make(chan runc.Exit, bufferSize) diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index b67cc1fa3a7..367e19cad84 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,11 +20,11 @@ package sys import ( + "fmt" "net" "os" "path/filepath" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -31,7 +32,7 @@ import ( func CreateUnixSocket(path string) (net.Listener, error) { // BSDs have a 104 limit if len(path) > 104 { - return nil, errors.Errorf("%q: unix socket path too long (> 104)", path) + return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path) } if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go index 3ee7679b49b..1ae12bc5114 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_windows.go +++ b/vendor/github.com/containerd/containerd/sys/socket_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/vendor/github.com/containerd/containerd/sys/stat_bsd.go deleted file mode 100644 index 4f03cd6cb0f..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_bsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the access time from a stat struct -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atimespec -} - -// StatCtime returns the created time from a stat struct -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctimespec -} - -// StatMtime returns the modified time from a stat struct -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtimespec -} - -// StatATimeAsTime returns the access time as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go b/vendor/github.com/containerd/containerd/sys/stat_openbsd.go deleted file mode 100644 index ec3b9df69aa..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - // The int64 conversions ensure the line compiles for 32-bit systems as well. - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/sys/stat_unix.go b/vendor/github.com/containerd/containerd/sys/stat_unix.go deleted file mode 100644 index 21a666dff86..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build linux solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index b8200307f3a..b5e00c5d820 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.5.8+unknown" + Version = "1.6.4+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 708b2668990..0da3efe4c21 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,9 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" + "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -38,7 +39,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -48,6 +48,7 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context } type Option func(o *options) error @@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { - rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err) + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -307,7 +333,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri // Import tar file. intar, err := importTar(in) if err != nil { - return nil, errors.Wrap(err, "failed to sort") + return nil, fmt.Errorf("failed to sort: %w", err) } // Sort the tar file respecting to the prioritized files list. @@ -318,7 +344,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri *missedPrioritized = append(*missedPrioritized, l) continue // allow not found } - return nil, errors.Wrap(err, "failed to sort tar entries") + return nil, fmt.Errorf("failed to sort tar entries: %w", err) } } if len(prioritized) == 0 { @@ -371,7 +397,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} pw, err := newCountReader(in) if err != nil { - return nil, errors.Wrap(err, "failed to make position watcher") + return nil, fmt.Errorf("failed to make position watcher: %w", err) } tr := tar.NewReader(pw) @@ -383,7 +409,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err == io.EOF { break } else { - return nil, errors.Wrap(err, "failed to parse tar file") + return nil, fmt.Errorf("failed to parse tar file, %w", err) } } switch cleanEntryName(h.Name) { @@ -420,7 +446,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error { _, okIn := in.get(name) _, okOut := out.get(name) if !okIn && !okOut { - return errors.Wrapf(errNotFound, "file: %q", name) + return fmt.Errorf("file: %q: %w", name, errNotFound) } parent, _ := path.Split(strings.TrimSuffix(name, "/")) @@ -506,12 +532,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index e56319545e6..921e59ec6ef 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -27,10 +27,10 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "errors" "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -40,7 +40,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/vbatts/tar-split/archive/tar" ) @@ -107,7 +106,7 @@ type Telemetry struct { } // Open opens a stargz file for reading. -// The behaviour is configurable using options. +// The behavior is configurable using options. // // Note that each entry name is normalized as the path that is relative to root. func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { @@ -385,8 +384,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.Digest != "" { d, err := digest.Parse(e.Digest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse regular file digest %q", e.Digest) + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) } regDigestMap[e.Offset] = d } else { @@ -401,8 +399,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.ChunkDigest != "" { d, err := digest.Parse(e.ChunkDigest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse chunk digest %q", e.ChunkDigest) + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) } chunkDigestMap[e.Offset] = d } else { @@ -581,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) } return io.ReadFull(dr, p) @@ -647,7 +644,7 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { } blobPayloadSize, _, _, err := c.ParseFooter(footer) if err != nil { - return nil, errors.Wrapf(err, "failed to parse footer") + return nil, fmt.Errorf("failed to parse footer: %w", err) } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } @@ -935,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 7330849cb89..591d7a62e11 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -34,7 +34,6 @@ import ( "strconv" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type gzipCompression struct { @@ -150,7 +149,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t } tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } @@ -179,7 +178,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff } zr, err := gzip.NewReader(bytes.NewReader(p)) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader") + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() extra := zr.Header.Extra @@ -191,7 +190,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff } tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 9224e456dde..8f27dfb3ea2 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -28,9 +28,9 @@ import ( "compress/gzip" "crypto/sha256" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "os" "reflect" "sort" @@ -41,7 +41,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // TestingController is Compression with some helper methods necessary for testing. @@ -287,11 +286,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } @@ -1062,18 +1061,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT fSize := controller.FooterSize() footer := make([]byte, fSize) if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { - return nil, 0, errors.Wrap(err, "error reading footer") + return nil, 0, fmt.Errorf("error reading footer: %w", err) } _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) if err != nil { - return nil, 0, errors.Wrapf(err, "failed to parse footer") + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) } // Decode the TOC JSON tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { - return nil, 0, errors.Wrap(err, "failed to parse TOC") + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) } return decodedJTOC, tocOffset, nil } diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 8bad5b1115e..ccf7be53a89 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -6,7 +6,7 @@ env: #### Global variables used for all tasks #### # Name of the ultimate destination branch for this CI run, PR or post-merge. - DEST_BRANCH: "release-1.23" + DEST_BRANCH: "main" GOPATH: "/var/tmp/go" GOSRC: "${GOPATH}/src/github.com/containers/buildah" # Overrides default location (/tmp/cirrus) for repo clone @@ -19,22 +19,22 @@ env: CIRRUS_CLONE_DEPTH: 50 # Unless set by in_podman.sh, default to operating outside of a podman container IN_PODMAN: 'false' + # root or rootless + PRIV_NAME: root #### #### Cache-image names to test with #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" - UBUNTU_NAME: "ubuntu-2104" - PRIOR_UBUNTU_NAME: "ubuntu-2010" + FEDORA_NAME: "fedora-36" + PRIOR_FEDORA_NAME: "fedora-35" + UBUNTU_NAME: "ubuntu-2110" - IMAGE_SUFFIX: "c6248193773010944" + IMAGE_SUFFIX: "c4955393725038592" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}" IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" @@ -76,7 +76,6 @@ meta_task: ${FEDORA_CACHE_IMAGE_NAME} ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} - ${PRIOR_UBUNTU_CACHE_IMAGE_NAME} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14] @@ -131,13 +130,35 @@ vendor_task: - './hack/tree_status.sh' +# Confirm cross-compile ALL architectures on a Mac OS-X VM. +cross_build_task: + name: "Cross Compile" + alias: cross_build + only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' + + osx_instance: + image: 'big-sur-base' + + script: + - brew update + - brew install go + - brew install go-md2man + - brew install gpgme + - go version + - make cross CGO_ENABLED=0 + + binary_artifacts: + path: ./bin/* + + unit_task: name: 'Unit tests w/ $STORAGE_DRIVER' alias: unit - only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' - depends_on: + only_if: *not_docs + depends_on: &smoke_vendor_cross - smoke - vendor + - cross_build timeout_in: 1h @@ -159,8 +180,7 @@ conformance_task: name: 'Build Conformance w/ $STORAGE_DRIVER' alias: conformance only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross gce_instance: image_name: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -177,85 +197,11 @@ conformance_task: conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}' -# Confirm cross-compile ALL architectures on a Mac OS-X VM. -cross_build_task: - name: "Cross Compile" - alias: cross_build - only_if: *not_docs - depends_on: - - unit - - osx_instance: - image: 'big-sur-base' - - script: - - brew update - - brew install go - - brew install go-md2man - - brew install gpgme - - go version - - make cross CGO_ENABLED=0 - - binary_artifacts: - path: ./bin/* - - -static_build_task: - name: "Static Build" - alias: static_build - only_if: *not_docs - depends_on: - - unit - - gce_instance: - image_name: "${FEDORA_CACHE_IMAGE_NAME}" - cpu: 8 - memory: 12 - disk: 200 - - env: - NIX_FQIN: "docker.io/nixos/nix:latest" - - init_script: | - set -ex - setenforce 0 - growpart /dev/sda 1 || true - resize2fs /dev/sda1 || true - yum -y install podman - - nix_cache: - folder: '.cache' - fingerprint_script: cat nix/* - - build_script: | - set -ex - mkdir -p .cache - mv .cache /nix - if [[ -z $(ls -A /nix) ]]; then - podman run --rm --privileged -i -v /:/mnt \ - $NIX_FQIN \ - cp -rfT /nix /mnt/nix - fi - podman run --rm --privileged -i -v /nix:/nix \ - -v ${PWD}:${PWD} -w ${PWD} \ - $NIX_FQIN \ - nix --print-build-logs --option cores 8 \ - --option max-jobs 8 build --file nix/ - - binaries_artifacts: - path: "result/bin/buildah" - - save_cache_script: | - mv /nix .cache - chown -Rf $(whoami) .cache - - integration_task: name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER" alias: integration only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross matrix: # VFS @@ -271,10 +217,6 @@ integration_task: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'vfs' - - env: - DISTRO_NV: "${PRIOR_UBUNTU_NAME}" - IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'vfs' # OVERLAY - env: DISTRO_NV: "${FEDORA_NAME}" @@ -288,10 +230,6 @@ integration_task: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' - - env: - DISTRO_NV: "${PRIOR_UBUNTU_NAME}" - IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' gce_instance: image_name: "$IMAGE_NAME" @@ -314,13 +252,50 @@ integration_task: package_versions_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh packages' golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang' +integration_rootless_task: + name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER" + alias: integration_rootless + only_if: *not_docs + depends_on: *smoke_vendor_cross + + matrix: + # Running rootless tests on overlay + # OVERLAY + - env: + DISTRO_NV: "${FEDORA_NAME}" + IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + - env: + DISTRO_NV: "${PRIOR_FEDORA_NAME}" + IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + - env: + DISTRO_NV: "${UBUNTU_NAME}" + IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + + gce_instance: + image_name: "$IMAGE_NAME" + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' + integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}' + + binary_artifacts: + path: ./bin/* + + always: + <<: *standardlogs in_podman_task: name: "Containerized Integration" alias: in_podman only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross env: # This is key, cause the scripts to re-execute themselves inside a container. @@ -356,7 +331,6 @@ success_task: - cross_build - integration - in_podman - - static_build container: image: "quay.io/libpod/alpine:latest" diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore index d98205316c2..939ce6ef52d 100644 --- a/vendor/github.com/containers/buildah/.gitignore +++ b/vendor/github.com/containers/buildah/.gitignore @@ -1,11 +1,12 @@ docs/buildah*.1 +docs/*.5 /bin /buildah /imgtype /build/ -tests/tools/build +/tests/tools/build Dockerfile* !/tests/bud/*/Dockerfile* !/tests/conformance/**/Dockerfile* *.swp -result +/result/ diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml index 0c7e3100793..af0b10c76b2 100644 --- a/vendor/github.com/containers/buildah/.golangci.yml +++ b/vendor/github.com/containers/buildah/.golangci.yml @@ -7,18 +7,7 @@ run: # Don't exceed number of threads available when running under CI concurrency: 4 linters: - enable-all: true - disable: - # All these break for one reason or another - - dupl - - funlen - - gochecknoglobals - - gochecknoinits - - goconst - - gocritic - - gocyclo - - gosec - - lll - - maligned - - prealloc - - scopelint + enable: + - revive + - unconvert + - unparam diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 685c4f4d36a..36fa66893bb 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,10 +2,318 @@ # Changelog -## v1.23.1 (2021-09-27) +## v1.26.1 (2022-05-04) - Vendor containers/common v0.44.2 - post-1.23 branch fixups + Make `buildah build --label foo` create an empty "foo" label again + Bump to v1.27.0-dev + +## v1.26.0 (2022-05-04) + + imagebuildah,build: move deepcopy of args before we spawn goroutine + Vendor in containers/storage v1.40.2 + buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated + help output: get more consistent about option usage text + Handle OS version and features flags + buildah build: --annotation and --label should remove values + buildah build: add a --env + buildah: deep copy options.Args before performing concurrent build/stage + test: inline platform and builtinargs behaviour + vendor: bump imagebuilder to master/009dbc6 + build: automatically set correct TARGETPLATFORM where expected + build(deps): bump github.com/fsouza/go-dockerclient + Vendor in containers/(common, storage, image) + imagebuildah, executor: process arg variables while populating baseMap + buildkit: add support for custom build output with --output + Cirrus: Update CI VMs to F36 + fix staticcheck linter warning for deprecated function + Fix docs build on FreeBSD + build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0 + copier.unwrapError(): update for Go 1.16 + copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit + copier.Put(): write to read-only directories + build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools + Rename $TESTSDIR (the plural one), step 4 of 3 + Rename $TESTSDIR (the plural one), step 3 of 3 + Rename $TESTSDIR (the plural one), step 2 of 3 + Rename $TESTSDIR (the plural one), step 1 of 3 + build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3 + Ed's periodic test cleanup + using consistent lowercase 'invalid' word in returned err msg + Update vendor of containers/(common,storage,image) + use etchosts package from c/common + run: set actual hostname in /etc/hostname to match docker parity + update c/common to latest main + Update vendor of containers/(common,storage,image) + Stop littering + manifest-create: allow creating manifest list from local image + Update vendor of storage,common,image + Bump golang.org/x/crypto to 7b82a4e + Initialize network backend before first pull + oci spec: change special mount points for namespaces + tests/helpers.bash: assert handle corner cases correctly + buildah: actually use containers.conf settings + integration tests: learn to start a dummy registry + Fix error check to work on Podman + buildah build should accept at most one arg + tests: reduce concurrency for flaky bud-multiple-platform-no-run + vendor in latest containers/common,image,storage + manifest-add: allow override arch,variant while adding image + Remove a stray `\` from .containerenv + Vendor in latest opencontainers/selinux v1.10.1 + build, commit: allow removing default identity labels + Create shorter names for containers based on image IDs + test: skip rootless on cgroupv2 in root env + fix hang when oci runtime fails + Set permissions for GitHub actions + copier test: use correct UID/GID in test archives + run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM + Bump back to v1.26.0-dev + build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1 + Included the URL to check the SHA + +## v1.25.1 (2022-03-30) + + buildah: create WORKDIR with USER permissions + vendor: update github.com/openshift/imagebuilder + copier: attempt to open the dir before adding it + Updated dependabot to get updates for GitHub actions. + Switch most calls to filepath.Walk to filepath.WalkDir + build: allow --no-cache and --layers so build cache can be overrided + build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + Bump to v1.26.0-dev + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + +## v1.25.0 (2022-03-25) + + install: drop RHEL/CentOS 7 doc + build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5 + Bump c/storage to v1.39.0 in main + Add a test for CVE-2022-27651 + build(deps): bump github.com/docker/docker + Bump github.com/prometheus/client_golang to v1.11.1 + [CI:DOCS] man pages: sort flags, and keep them that way + build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2 + Don't pollute + network setup: increase timeout to 4 minutes + do not set the inheritable capabilities + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3 + parse: convert exposed GetVolumes to internal only + buildkit: mount=type=cache support locking external cache store + .in support: improve error message when cpp is not installed + buildah image: install cpp + build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 + build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 + build(deps): bump github.com/docker/docker + Add --no-hosts flag to eliminate use of /etc/hosts within containers + test: remove skips for rootless users + test: unshare mount/umount if test is_rootless + tests/copy: read correct containers.conf + build(deps): bump github.com/docker/distribution + cirrus: add seperate task and matrix for rootless + tests: skip tests for rootless which need unshare + buildah: test rootless integration + vendor: bump c/storage to main/93ce26691863 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10 + tests/copy: initialize the network, too + [CI:DOCS] remove references to Kubic for CentOS and Ubuntu + build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1 + use c/image/pkg/blobcache + vendor c/image/v5@v5.20.0 + add: ensure the context directory is an absolute path + executor: docker builds must inherit healthconfig from base if any + docs: Remove Containerfile and containeringore + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9 + helpers.bash: Use correct syntax + speed up combination-namespaces test + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + Bump back to 1.25.0-dev + build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0 + +## v1.24.2 (2022-02-16) + + Increase subuid/subgid to 65535 + history: only add proxy vars to history if specified + run_linux: use --systemd-cgroup + buildah: new global option --cgroup-manager + Makefile: build with systemd when available + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + Bump c/common to v0.47.4 + Cirrus: Use updated VM images + conformance: add a few "replace-directory-with-symlink" tests + Bump back to v1.25.0-dev + +## v1.24.1 (2022-02-03) + + executor: Add support for inline --platform within Dockerfile + caps: fix buildah run --cap-add=all + Update vendor of openshift/imagebuilder + Bump version of containers/image and containers/common + Update vendor of containers/common + System tests: fix accidental vandalism of source dir + build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + imagebuildah.BuildDockerfiles(): create the jobs semaphore + build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + overlay: always honor mountProgram + overlay: move mount program invocation to separate function + overlay: move mount program lookup to separate function + Bump to v1.25.0-dev [NO TESTS NEEDED] + +## v1.24.0 (2022-01-26) + + Update vendor of containers/common + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + Github-workflow: Report both failures and errors. + build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + Update docs/buildah-build.1.md + [CI:DOCS] Fix typos and improve language + buildah bud --network add support for custom networks + Make pull commands be consistent + docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + Vendor in latest containers/image + Run codespell on code + .github/dependabot.yml: add tests/tools go.mod + CI: rm git-validation, add GHA job to validate PRs + tests/tools: bump go-md2man to v2.0.1 + tests/tools/Makefile: simplify + tests/tools: bump onsi/ginkgo to v1.16.5 + vendor: bump c/common and others + mount: add support for custom upper and workdir with overlay mounts + linux: fix lookup for runtime + overlay: add MountWithOptions to API which extends support for advanced overlay + Allow processing of SystemContext from FlagSet + .golangci.yml: enable unparam linter + util/resolveName: rm bool return + tests/tools: bump golangci-lint + .gitignore: fixups + all: fix capabilities.NewPid deprecation warnings + bind/mount.go: fix linter comment + all: fix gosimple warning S1039 + tests/e2e/buildah_suite_test.go: fix gosimple warnings + imagebuildah/executor.go: fix gosimple warning + util.go: fix gosimple warning + build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + Enable git-daemon tests + Allow processing of id options from FlagSet + Cirrus: Re-order tasks for more parallelism + Cirrus: Freshen VM images + Fix platform handling for empty os/arch values + Allow processing of network options from FlagSet + Fix permissions on secrets directory + Update containers/image and containers/common + bud.bats: use a local git daemon for the git protocol test + Allow processing of common options from FlagSet + Cirrus: Run int. tests in parallel with unit + vendor c/common + Fix default CNI paths + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + multi-stage: enable mounting stages across each other with selinux enabled + executor: Share selinux label of first stage with other stages in a build + buildkit: add from field to bind and cache mounts so images can be used as source + Use config.ProxyEnv from containers/common + use libnetwork from c/common for networking + setup the netns in the buildah parent process + build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + build: fix libsubid test + Allow callers to replace the ContainerSuffix + parse: allow parsing anomaly non-human value for memory control group + .cirrus: remove static_build from ci + stage_executor: re-use all possible layers from cache for squashed builds + build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + Allow rootless buildah to set resource limits on cgroup V2 + build(deps): bump github.com/docker/docker + tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + Wire logger through to config + copier.Put: check for is-not-a-directory using lstat, not stat + Turn on rootless cgroupv2 tests + Grab all of the containers.conf settings for namespaces. + image: set MediaType in OCI manifests + copier: RemoveAll possibly-directories + Simple README fix + images: accept multiple filter with logical AND + build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + UPdate vendor of container/storage + build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + Make LocalIP public function so Podman can use it + Fix UnsetEnv for buildah bud + Tests should rely only on static/unchanging images + run: ensure that stdio pipes are labeled correctly + build(deps): bump github.com/docker/docker + Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + chroot: don't use the generate default seccomp filter for unit tests + build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + ssh-agent: Increase timeout before we explicitly close connection + docs/tutorials: update + Clarify that manifest defaults to localhost as the registry name + "config": remove a stray bit of debug output + "commit": fix a flag typo + Fix an error message: unlocking vs locking + Expand the godoc for CommonBuildOptions.Secrets + chroot: accept an "rw" option + Add --unsetenv option to buildah commit and build + define.TempDirForURL(): show CombinedOutput when a command fails + config: support the variant field + rootless: do not bind mount /sys if not needed + Fix tutorial to specify command on buildah run line + build: history should not contain ARG values + docs: Use guaranteed path for go-md2man + run: honor --network=none from builder if nothing specified + networkpolicy: Should be enabled instead of default when explictly set + Add support for env var secret sources + build(deps): bump github.com/docker/docker + fix: another non-portable shebang + Rootless containers users should use additional groups + Support overlayfs path contains colon + Report ignorefile location when no content added + Add support for host.containers.internal in the /etc/hosts + build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + imagebuildah: fix nil deref + buildkit: add support for mount=type=cache + Default secret mode to 400 + [CI:DOCS] Include manifest example usage + docs: update buildah-from, buildah-pull 'platform' option compatibility notes + docs: update buildah-build 'platform' option compatibility notes + De-dockerize the man page as much as possible + [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + docs: Fix and Update Containerfile man page with supported mount types + mount: add tmpcopyup to tmpfs mount option + buildkit: Add support for --mount=type=tmpfs + build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + Fix command doc links in README.md + build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + build: Add support for buildkit like --mount=type=bind + Bump containerd to v1.5.7 + build(deps): bump github.com/docker/docker + tests: stop pulling php, composer + Fix .containerignore link file + Cirrus: Fix defunct package metadata breaking cache + build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + buildah build: add --all-platforms + Add man page for Containerfile and .containerignore + Plumb the remote logger throughut Buildah + Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + Run: Cleanup run directory after every RUN step + build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + Makefile: check for `-race` using `-mod=vendor` + imagebuildah: fix an attempt to write to a nil map + push: support to specify the compression format + conformance: allow test cases to specify dockerUseBuildKit + build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + unmarshalConvertedConfig(): handle zstd compression + tests/copy/copy: wire up compression options + Update to github.com/vbauerster/mpb v7.1.5 + Add flouthoc to OWNERS + build: Add additional step nodes when labels are modified + Makefile: turn on race detection whenever it's available + conformance: add more tests for exclusion short-circuiting + Update VM Images + Drop prior-ubuntu testing + Bump to v1.24.0-dev ## v1.23.0 (2021-09-13) diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 27e4ade6d67..95ce322b81a 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -3,7 +3,7 @@ export GOPROXY=https://proxy.golang.org APPARMORTAG := $(shell hack/apparmor_tag.sh) STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) SECURITYTAGS ?= seccomp $(APPARMORTAG) -TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) +TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) BUILDTAGS += $(TAGS) PREFIX := /usr/local BINDIR := $(PREFIX)/bin @@ -12,6 +12,8 @@ BUILDFLAGS := -tags "$(BUILDTAGS)" BUILDAH := buildah GO := go +GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi) +GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi) GO110 := 1.10 GOVERSION := $(findstring $(GO110),$(shell go version)) # test for go module support @@ -22,6 +24,7 @@ else export GO_BUILD=$(GO) build export GO_TEST=$(GO) test endif +RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race) GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) @@ -33,8 +36,8 @@ RUNC_COMMIT := v1.0.0-rc8 LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= -BUILDAH_LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go docker/*.go manifests/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go LINTFLAGS ?= @@ -65,14 +68,15 @@ static: cp -rfp ./result/bin/* ./bin/ bin/buildah: $(SOURCES) cmd/buildah/*.go - $(GO_BUILD) $(BUILDAH_LDFLAGS) -gcflags "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah + $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah .PHONY: buildah buildah: bin/buildah -LINUX_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^linux/))) -DARWIN_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^darwin/))) -WINDOWS_CROSS_TARGETS = $(addsuffix .exe,$(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^windows/)))) +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) +LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) +DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) +WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) .PHONY: cross cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) @@ -107,7 +111,6 @@ codespell: .PHONY: validate validate: install.tools ./tests/validate/whitespace.sh - ./tests/validate/git-validation.sh ./hack/xref-helpmsgs-manpages ./tests/validate/pr-should-include-tests ./tests/validate/buildahimages-are-sane @@ -164,14 +167,14 @@ test-integration: install.tools cd tests; ./test_runner.sh tests/testreport/testreport: tests/testreport/testreport.go - $(GO_BUILD) -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go + $(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go .PHONY: test-unit test-unit: tests/testreport/testreport - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m tmp=$(shell mktemp -d) ; \ mkdir -p $$tmp/root $$tmp/runroot; \ - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf vendor-in-container: podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS index 1e070e396e6..2eb9ea84c3e 100644 --- a/vendor/github.com/containers/buildah/OWNERS +++ b/vendor/github.com/containers/buildah/OWNERS @@ -2,6 +2,7 @@ approvers: - TomSweeneyRedHat - ashley-cui - cevich + - flouthoc - giuseppe - lsm5 - nalind diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index 95c8a9a7b35..512d3f87368 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -77,7 +77,9 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh): ```bash $ cat > lighttpd.sh <<"EOF" -#!/usr/bin/env bash -x +#!/usr/bin/env bash + +set -x ctr1=$(buildah from "${1:-fedora}") @@ -103,27 +105,27 @@ $ sudo ./lighttpd.sh ## Commands | Command | Description | | ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | -| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. | -| [buildah-build(1)](/docs/buildah-build.md) | Build an image using instructions from Containerfiles or Dockerfiles. | -| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. | -| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. | -| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. | -| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. | -| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | -| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. | -| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. | -| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. | -| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. | -| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. | -| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. | -| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. | -| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. | -| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. | -| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. | -| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. | -| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. | -| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. | -| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information | +| [buildah-add(1)](/docs/buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. | +| [buildah-build(1)](/docs/buildah-build.1.md) | Build an image using instructions from Containerfiles or Dockerfiles. | +| [buildah-commit(1)](/docs/buildah-commit.1.md) | Create an image from a working container. | +| [buildah-config(1)](/docs/buildah-config.1.md) | Update image configuration settings. | +| [buildah-containers(1)](/docs/buildah-containers.1.md) | List the working containers and their base images. | +| [buildah-copy(1)](/docs/buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. | +| [buildah-from(1)](/docs/buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | +| [buildah-images(1)](/docs/buildah-images.1.md) | List images in local storage. | +| [buildah-info(1)](/docs/buildah-info.1.md) | Display Buildah system information. | +| [buildah-inspect(1)](/docs/buildah-inspect.1.md) | Inspects the configuration of a container or image. | +| [buildah-mount(1)](/docs/buildah-mount.1.md) | Mount the working container's root filesystem. | +| [buildah-pull(1)](/docs/buildah-pull.1.md) | Pull an image from the specified location. | +| [buildah-push(1)](/docs/buildah-push.1.md) | Push an image from local storage to elsewhere. | +| [buildah-rename(1)](/docs/buildah-rename.1.md) | Rename a local container. | +| [buildah-rm(1)](/docs/buildah-rm.1.md) | Removes one or more working containers. | +| [buildah-rmi(1)](/docs/buildah-rmi.1.md) | Removes one or more images. | +| [buildah-run(1)](/docs/buildah-run.1.md) | Run a command inside of the container. | +| [buildah-tag(1)](/docs/buildah-tag.1.md) | Add an additional name to a local image. | +| [buildah-umount(1)](/docs/buildah-umount.1.md) | Unmount a working container's root file system. | +| [buildah-unshare(1)](/docs/buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. | +| [buildah-version(1)](/docs/buildah-version.1.md) | Display the Buildah Version Information | **Future goals include:** * more CI tests diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index f17e3a9c943..8aa53a2920f 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -47,8 +47,10 @@ type AddAndCopyOptions struct { // If the sources include directory trees, Hasher will be passed // tar-format archives of the directory trees. Hasher io.Writer - // Excludes is the contents of the .dockerignore file. + // Excludes is the contents of the .containerignore file. Excludes []string + // IgnoreFile is the path to the .containerignore file. + IgnoreFile string // ContextDir is the base directory for content being copied and // Excludes patterns. ContextDir string @@ -199,6 +201,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if err != nil { return errors.Wrapf(err, "error determining current working directory") } + } else { + if !filepath.IsAbs(options.ContextDir) { + contextDir, err = filepath.Abs(options.ContextDir) + if err != nil { + return errors.Wrapf(err, "error converting context directory path %q to an absolute path", options.ContextDir) + } + } } // Figure out what sorts of sources we have. @@ -564,7 +573,11 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } } if itemsCopied == 0 { - return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out)", localSourceStat.Glob, len(localSourceStat.Globbed)) + excludesFile := "" + if options.IgnoreFile != "" { + excludesFile = " using " + options.IgnoreFile + } + return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile) } } return nil @@ -642,3 +655,37 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3 } return owner.UID, owner.GID, nil } + +// EnsureContainerPathAs creates the specified directory owned by USER +// with the file mode set to MODE. +func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return err + } + defer func() { + if err2 := b.Unmount(); err2 != nil { + logrus.Errorf("error unmounting container: %v", err2) + } + }() + + uid, gid := uint32(0), uint32(0) + if user != "" { + if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil { + uid = uidForCopy + gid = gidForCopy + } + } + + destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} + opts := copier.MkdirOptions{ + ChmodNew: mode, + ChownNew: idPair, + UIDMap: destUIDMap, + GIDMap: destGIDMap, + } + return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts) + +} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go index 789233405e2..0e45d12c2c8 100644 --- a/vendor/github.com/containers/buildah/bind/mount.go +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -270,7 +270,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { } return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint) } - if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { // nolint:unconvert (required for some OS/arch combinations) + if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations) logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint) continue } diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index f760d252767..2d81a8c5e35 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -13,6 +13,7 @@ import ( "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage" @@ -154,6 +155,10 @@ type Builder struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use when running processes in the container with non-host user namespaces. IDMappingOptions define.IDMappingOptions // Capabilities is a list of capabilities to use when running commands in the container. @@ -257,6 +262,8 @@ type BuilderOptions struct { // or "scratch" to indicate that the container should not be based on // an image. FromImage string + // ContainerSuffix is the suffix to add for generated container names + ContainerSuffix string // Container is a desired name for the build container. Container string // PullPolicy decides whether or not we should pull the image that @@ -271,6 +278,8 @@ type BuilderOptions struct { // to store copies of layer blobs that we pull down, if any. It should // already exist. BlobDirectory string + // Logger is the logrus logger to write log messages with + Logger *logrus.Logger `json:"-"` // Mount signals to NewBuilder() that the container should be mounted // immediately. Mount bool @@ -307,6 +316,10 @@ type BuilderOptions struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use if we're setting up our own user namespace. IDMappingOptions *define.IDMappingOptions // Capabilities is a list of capabilities to use when @@ -317,7 +330,7 @@ type BuilderOptions struct { Format string // Devices are the additional devices to add to the containers Devices define.ContainerDevices - //DefaultEnv for containers + // DefaultEnv is deprecated and ignored. DefaultEnv []string // MaxPullRetries is the maximum number of attempts we'll make to pull // any one image from the external registry if the first attempt fails. @@ -327,6 +340,10 @@ type BuilderOptions struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + // ProcessLabel is the SELinux process label associated with the container + ProcessLabel string + // MountLabel is the SELinux mount label associated with the container + MountLabel string } // ImportOptions are used to initialize a Builder from an existing container @@ -396,6 +413,12 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) { if b.Type != containerType { return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type) } + + netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath) + if err != nil { + return nil, err + } + b.NetworkInterface = netInt b.store = store b.fixupConfig(nil) b.setupLogger() diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 8926b2e6f37..127c674bfff 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,6 +1,308 @@ -- Changelog for v1.23.1 (2021-09-27) - * Vendor containers/common v0.44.2 - * post-1.23 branch fixups +- Changelog for v1.26.1 (2022-05-04) + * Make `buildah build --label foo` create an empty "foo" label again + * Bump to v1.27.0-dev + +- Changelog for v1.26.0 (2022-05-04) + * imagebuildah,build: move deepcopy of args before we spawn goroutine + * Vendor in containers/storage v1.40.2 + * buildah.BuilderOptions.DefaultEnv is ignored, so mark it as deprecated + * help output: get more consistent about option usage text + * Handle OS version and features flags + * buildah build: --annotation and --label should remove values + * buildah build: add a --env + * buildah: deep copy options.Args before performing concurrent build/stage + * test: inline platform and builtinargs behaviour + * vendor: bump imagebuilder to master/009dbc6 + * build: automatically set correct TARGETPLATFORM where expected + * build(deps): bump github.com/fsouza/go-dockerclient + * Vendor in containers/(common, storage, image) + * imagebuildah, executor: process arg variables while populating baseMap + * buildkit: add support for custom build output with --output + * Cirrus: Update CI VMs to F36 + * fix staticcheck linter warning for deprecated function + * Fix docs build on FreeBSD + * build(deps): bump github.com/containernetworking/cni from 1.0.1 to 1.1.0 + * copier.unwrapError(): update for Go 1.16 + * copier.PutOptions: add StripSetuidBit/StripSetgidBit/StripStickyBit + * copier.Put(): write to read-only directories + * build(deps): bump github.com/cpuguy83/go-md2man/v2 in /tests/tools + * Rename $TESTSDIR (the plural one), step 4 of 3 + * Rename $TESTSDIR (the plural one), step 3 of 3 + * Rename $TESTSDIR (the plural one), step 2 of 3 + * Rename $TESTSDIR (the plural one), step 1 of 3 + * build(deps): bump github.com/containerd/containerd from 1.6.2 to 1.6.3 + * Ed's periodic test cleanup + * using consistent lowercase 'invalid' word in returned err msg + * Update vendor of containers/(common,storage,image) + * use etchosts package from c/common + * run: set actual hostname in /etc/hostname to match docker parity + * update c/common to latest main + * Update vendor of containers/(common,storage,image) + * Stop littering + * manifest-create: allow creating manifest list from local image + * Update vendor of storage,common,image + * Bump golang.org/x/crypto to 7b82a4e + * Initialize network backend before first pull + * oci spec: change special mount points for namespaces + * tests/helpers.bash: assert handle corner cases correctly + * buildah: actually use containers.conf settings + * integration tests: learn to start a dummy registry + * Fix error check to work on Podman + * buildah build should accept at most one arg + * tests: reduce concurrency for flaky bud-multiple-platform-no-run + * vendor in latest containers/common,image,storage + * manifest-add: allow override arch,variant while adding image + * Remove a stray `\` from .containerenv + * Vendor in latest opencontainers/selinux v1.10.1 + * build, commit: allow removing default identity labels + * Create shorter names for containers based on image IDs + * test: skip rootless on cgroupv2 in root env + * fix hang when oci runtime fails + * Set permissions for GitHub actions + * copier test: use correct UID/GID in test archives + * run: set parent-death signals and forward SIGHUP/SIGINT/SIGTERM + * Bump back to v1.26.0-dev + * build(deps): bump github.com/opencontainers/runc from 1.1.0 to 1.1.1 + * Included the URL to check the SHA + +- Changelog for v1.25.1 (2022-03-30) + * buildah: create WORKDIR with USER permissions + * vendor: update github.com/openshift/imagebuilder + * copier: attempt to open the dir before adding it + * Updated dependabot to get updates for GitHub actions. + * Switch most calls to filepath.Walk to filepath.WalkDir + * build: allow --no-cache and --layers so build cache can be overrided + * build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + * Bump to v1.26.0-dev + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + +- Changelog for v1.25.0 (2022-03-25) + * install: drop RHEL/CentOS 7 doc + * build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5 + * Bump c/storage to v1.39.0 in main + * Add a test for CVE-2022-27651 + * build(deps): bump github.com/docker/docker + * Bump github.com/prometheus/client_golang to v1.11.1 + * [CI:DOCS] man pages: sort flags, and keep them that way + * build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2 + * Don't pollute + * network setup: increase timeout to 4 minutes + * do not set the inheritable capabilities + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3 + * parse: convert exposed GetVolumes to internal only + * buildkit: mount=type=cache support locking external cache store + * .in support: improve error message when cpp is not installed + * buildah image: install cpp + * build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 + * build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 + * build(deps): bump github.com/docker/docker + * Add --no-hosts flag to eliminate use of /etc/hosts within containers + * test: remove skips for rootless users + * test: unshare mount/umount if test is_rootless + * tests/copy: read correct containers.conf + * build(deps): bump github.com/docker/distribution + * cirrus: add seperate task and matrix for rootless + * tests: skip tests for rootless which need unshare + * buildah: test rootless integration + * vendor: bump c/storage to main/93ce26691863 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10 + * tests/copy: initialize the network, too + * [CI:DOCS] remove references to Kubic for CentOS and Ubuntu + * build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1 + * use c/image/pkg/blobcache + * vendor c/image/v5@v5.20.0 + * add: ensure the context directory is an absolute path + * executor: docker builds must inherit healthconfig from base if any + * docs: Remove Containerfile and containeringore + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9 + * helpers.bash: Use correct syntax + * speed up combination-namespaces test + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * Bump back to 1.25.0-dev + * build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0 + +- Changelog for v1.24.2 (2022-02-16) + * Increase subuid/subgid to 65535 + * history: only add proxy vars to history if specified + * run_linux: use --systemd-cgroup + * buildah: new global option --cgroup-manager + * Makefile: build with systemd when available + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + * Bump c/common to v0.47.4 + * Cirrus: Use updated VM images + * conformance: add a few "replace-directory-with-symlink" tests + * Bump back to v1.25.0-dev + +- Changelog for v1.24.1 (2022-02-03) + * executor: Add support for inline --platform within Dockerfile + * caps: fix buildah run --cap-add=all + * Update vendor of openshift/imagebuilder + * Bump version of containers/image and containers/common + * Update vendor of containers/common + * System tests: fix accidental vandalism of source dir + * build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + * imagebuildah.BuildDockerfiles(): create the jobs semaphore + * build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + * overlay: always honor mountProgram + * overlay: move mount program invocation to separate function + * overlay: move mount program lookup to separate function + * Bump to v1.25.0-dev [NO TESTS NEEDED] + +- Changelog for v1.24.0 (2022-01-26) + * Update vendor of containers/common + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * Github-workflow: Report both failures and errors. + * build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + * Update docs/buildah-build.1.md + * [CI:DOCS] Fix typos and improve language + * buildah bud --network add support for custom networks + * Make pull commands be consistent + * docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + * build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + * Vendor in latest containers/image + * Run codespell on code + * .github/dependabot.yml: add tests/tools go.mod + * CI: rm git-validation, add GHA job to validate PRs + * tests/tools: bump go-md2man to v2.0.1 + * tests/tools/Makefile: simplify + * tests/tools: bump onsi/ginkgo to v1.16.5 + * vendor: bump c/common and others + * mount: add support for custom upper and workdir with overlay mounts + * linux: fix lookup for runtime + * overlay: add MountWithOptions to API which extends support for advanced overlay + * Allow processing of SystemContext from FlagSet + * .golangci.yml: enable unparam linter + * util/resolveName: rm bool return + * tests/tools: bump golangci-lint + * .gitignore: fixups + * all: fix capabilities.NewPid deprecation warnings + * bind/mount.go: fix linter comment + * all: fix gosimple warning S1039 + * tests/e2e/buildah_suite_test.go: fix gosimple warnings + * imagebuildah/executor.go: fix gosimple warning + * util.go: fix gosimple warning + * build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + * Enable git-daemon tests + * Allow processing of id options from FlagSet + * Cirrus: Re-order tasks for more parallelism + * Cirrus: Freshen VM images + * Fix platform handling for empty os/arch values + * Allow processing of network options from FlagSet + * Fix permissions on secrets directory + * Update containers/image and containers/common + * bud.bats: use a local git daemon for the git protocol test + * Allow processing of common options from FlagSet + * Cirrus: Run int. tests in parallel with unit + * vendor c/common + * Fix default CNI paths + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + * multi-stage: enable mounting stages across each other with selinux enabled + * executor: Share selinux label of first stage with other stages in a build + * buildkit: add from field to bind and cache mounts so images can be used as source + * Use config.ProxyEnv from containers/common + * use libnetwork from c/common for networking + * setup the netns in the buildah parent process + * build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + * build: fix libsubid test + * Allow callers to replace the ContainerSuffix + * parse: allow parsing anomaly non-human value for memory control group + * .cirrus: remove static_build from ci + * stage_executor: re-use all possible layers from cache for squashed builds + * build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + * Allow rootless buildah to set resource limits on cgroup V2 + * build(deps): bump github.com/docker/docker + * tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + * build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + * Wire logger through to config + * copier.Put: check for is-not-a-directory using lstat, not stat + * Turn on rootless cgroupv2 tests + * Grab all of the containers.conf settings for namespaces. + * image: set MediaType in OCI manifests + * copier: RemoveAll possibly-directories + * Simple README fix + * images: accept multiple filter with logical AND + * build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + * UPdate vendor of container/storage + * build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + * build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + * Make LocalIP public function so Podman can use it + * Fix UnsetEnv for buildah bud + * Tests should rely only on static/unchanging images + * run: ensure that stdio pipes are labeled correctly + * build(deps): bump github.com/docker/docker + * Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + * chroot: don't use the generate default seccomp filter for unit tests + * build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + * ssh-agent: Increase timeout before we explicitly close connection + * docs/tutorials: update + * Clarify that manifest defaults to localhost as the registry name + * "config": remove a stray bit of debug output + * "commit": fix a flag typo + * Fix an error message: unlocking vs locking + * Expand the godoc for CommonBuildOptions.Secrets + * chroot: accept an "rw" option + * Add --unsetenv option to buildah commit and build + * define.TempDirForURL(): show CombinedOutput when a command fails + * config: support the variant field + * rootless: do not bind mount /sys if not needed + * Fix tutorial to specify command on buildah run line + * build: history should not contain ARG values + * docs: Use guaranteed path for go-md2man + * run: honor --network=none from builder if nothing specified + * networkpolicy: Should be enabled instead of default when explictly set + * Add support for env var secret sources + * build(deps): bump github.com/docker/docker + * fix: another non-portable shebang + * Rootless containers users should use additional groups + * Support overlayfs path contains colon + * Report ignorefile location when no content added + * Add support for host.containers.internal in the /etc/hosts + * build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + * imagebuildah: fix nil deref + * buildkit: add support for mount=type=cache + * Default secret mode to 400 + * [CI:DOCS] Include manifest example usage + * docs: update buildah-from, buildah-pull 'platform' option compatibility notes + * docs: update buildah-build 'platform' option compatibility notes + * De-dockerize the man page as much as possible + * [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + * docs: Fix and Update Containerfile man page with supported mount types + * mount: add tmpcopyup to tmpfs mount option + * buildkit: Add support for --mount=type=tmpfs + * build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + * Fix command doc links in README.md + * build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + * build: Add support for buildkit like --mount=type=bind + * Bump containerd to v1.5.7 + * build(deps): bump github.com/docker/docker + * tests: stop pulling php, composer + * Fix .containerignore link file + * Cirrus: Fix defunct package metadata breaking cache + * build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + * buildah build: add --all-platforms + * Add man page for Containerfile and .containerignore + * Plumb the remote logger throughut Buildah + * Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + * Run: Cleanup run directory after every RUN step + * build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + * Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + * Makefile: check for `-race` using `-mod=vendor` + * imagebuildah: fix an attempt to write to a nil map + * push: support to specify the compression format + * conformance: allow test cases to specify dockerUseBuildKit + * build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + * build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + * unmarshalConvertedConfig(): handle zstd compression + * tests/copy/copy: wire up compression options + * Update to github.com/vbauerster/mpb v7.1.5 + * Add flouthoc to OWNERS + * build: Add additional step nodes when labels are modified + * Makefile: turn on race detection whenever it's available + * conformance: add more tests for exclusion short-circuiting + * Update VM Images + Drop prior-ubuntu testing + * Bump to v1.24.0-dev - Changelog for v1.23.0 (2021-09-13) * Vendor in containers/common v0.44.0 diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go index e6f28e81a1c..9ff7c933d08 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -10,6 +10,7 @@ import ( "io/ioutil" "os" "os/exec" + "os/signal" "path/filepath" "runtime" "strconv" @@ -159,10 +160,24 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade // Start the grandparent subprocess. cmd := unshare.Command(runUsingChrootCommand) + setPdeathsig(cmd.Cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} + interrupted := make(chan os.Signal, 100) + cmd.Hook = func(int) error { + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() + return nil + } + logrus.Debugf("Running %#v in %#v", cmd.Cmd, cmd) confwg.Add(1) go func() { @@ -173,6 +188,8 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) err = cmd.Run() confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err == nil { return conferr } @@ -238,7 +255,7 @@ func runUsingChrootMain() { // Set the kernel's lock to "unlocked". locked := 0 if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 { - logrus.Errorf("error locking PTY descriptor: %v", err) + logrus.Errorf("error unlocking PTY descriptor: %v", err) os.Exit(1) } // Get a handle for the other end. @@ -571,6 +588,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io // Start the parent subprocess. cmd := unshare.Command(append([]string{runUsingChrootExecCommand}, spec.Process.Args...)...) + setPdeathsig(cmd.Cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} @@ -593,10 +611,19 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io } cmd.OOMScoreAdj = spec.Process.OOMScoreAdj cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) + interrupted := make(chan os.Signal, 100) cmd.Hook = func(int) error { for _, f := range closeOnceRunning { f.Close() } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() return nil } @@ -609,6 +636,8 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io }() err = cmd.Run() confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err != nil { if exitError, ok := err.(*exec.ExitError); ok { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { @@ -792,11 +821,27 @@ func runUsingChrootExecMain() { // Actually run the specified command. cmd := exec.Command(args[0], args[1:]...) + setPdeathsig(cmd) cmd.Env = options.Spec.Process.Env cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr cmd.Dir = cwd logrus.Debugf("Running %#v (PATH = %q)", cmd, os.Getenv("PATH")) - if err = cmd.Run(); err != nil { + interrupted := make(chan os.Signal, 100) + if err = cmd.Start(); err != nil { + fmt.Fprintf(os.Stderr, "process failed to start with error: %v", err) + } + go func() { + for range interrupted { + if err := cmd.Process.Signal(syscall.SIGKILL); err != nil { + logrus.Infof("%v while attempting to send SIGKILL to child process", err) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + err = cmd.Wait() + signal.Stop(interrupted) + close(interrupted) + if err != nil { if exitError, ok := err.(*exec.ExitError); ok { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { @@ -883,46 +928,49 @@ func setApparmorProfile(spec *specs.Spec) error { // setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start. func setCapabilities(spec *specs.Spec, keepCaps ...string) error { - currentCaps, err := capability.NewPid(0) + currentCaps, err := capability.NewPid2(0) if err != nil { return errors.Wrapf(err, "error reading capabilities of current process") } - caps, err := capability.NewPid(0) + if err := currentCaps.Load(); err != nil { + return errors.Wrapf(err, "error loading capabilities") + } + caps, err := capability.NewPid2(0) if err != nil { return errors.Wrapf(err, "error reading capabilities of current process") } capMap := map[capability.CapType][]string{ capability.BOUNDING: spec.Process.Capabilities.Bounding, capability.EFFECTIVE: spec.Process.Capabilities.Effective, - capability.INHERITABLE: spec.Process.Capabilities.Inheritable, + capability.INHERITABLE: []string{}, capability.PERMITTED: spec.Process.Capabilities.Permitted, capability.AMBIENT: spec.Process.Capabilities.Ambient, } knownCaps := capability.List() - caps.Clear(capability.CAPS | capability.BOUNDS | capability.AMBS) + noCap := capability.Cap(-1) for capType, capList := range capMap { for _, capToSet := range capList { - cap := capability.CAP_LAST_CAP + cap := noCap for _, c := range knownCaps { if strings.EqualFold("CAP_"+c.String(), capToSet) { cap = c break } } - if cap == capability.CAP_LAST_CAP { + if cap == noCap { return errors.Errorf("error mapping capability %q to a number", capToSet) } caps.Set(capType, cap) } for _, capToSet := range keepCaps { - cap := capability.CAP_LAST_CAP + cap := noCap for _, c := range knownCaps { if strings.EqualFold("CAP_"+c.String(), capToSet) { cap = c break } } - if cap == capability.CAP_LAST_CAP { + if cap == noCap { return errors.Errorf("error mapping capability %q to a number", capToSet) } if currentCaps.Get(capType, cap) { @@ -1191,21 +1239,33 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } requestFlags := bindFlags expectedFlags := uintptr(0) - if util.StringInSlice("nodev", m.Options) { - requestFlags |= unix.MS_NODEV - expectedFlags |= unix.ST_NODEV - } - if util.StringInSlice("noexec", m.Options) { - requestFlags |= unix.MS_NOEXEC - expectedFlags |= unix.ST_NOEXEC - } - if util.StringInSlice("nosuid", m.Options) { - requestFlags |= unix.MS_NOSUID - expectedFlags |= unix.ST_NOSUID - } - if util.StringInSlice("ro", m.Options) { - requestFlags |= unix.MS_RDONLY - expectedFlags |= unix.ST_RDONLY + for _, option := range m.Options { + switch option { + case "nodev": + requestFlags |= unix.MS_NODEV + expectedFlags |= unix.ST_NODEV + case "dev": + requestFlags &= ^uintptr(unix.MS_NODEV) + expectedFlags &= ^uintptr(unix.ST_NODEV) + case "noexec": + requestFlags |= unix.MS_NOEXEC + expectedFlags |= unix.ST_NOEXEC + case "exec": + requestFlags &= ^uintptr(unix.MS_NOEXEC) + expectedFlags &= ^uintptr(unix.ST_NOEXEC) + case "nosuid": + requestFlags |= unix.MS_NOSUID + expectedFlags |= unix.ST_NOSUID + case "suid": + requestFlags &= ^uintptr(unix.MS_NOSUID) + expectedFlags &= ^uintptr(unix.ST_NOSUID) + case "ro": + requestFlags |= unix.MS_RDONLY + expectedFlags |= unix.ST_RDONLY + case "rw": + requestFlags &= ^uintptr(unix.MS_RDONLY) + expectedFlags &= ^uintptr(unix.ST_RDONLY) + } } switch m.Type { case "bind": @@ -1404,3 +1464,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } return undoBinds, nil } + +// setPdeathsig sets a parent-death signal for the process +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go index 1ca0a159ebc..f130f7a22fd 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -3,6 +3,9 @@ package chroot import ( + "io/ioutil" + + "github.com/containers/common/pkg/seccomp" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" libseccomp "github.com/seccomp/libseccomp-golang" @@ -171,3 +174,27 @@ func setSeccomp(spec *specs.Spec) error { } return nil } + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + switch seccompProfilePath { + case "unconfined": + spec.Linux.Seccomp = nil + case "": + seccompConfig, err := seccomp.GetDefaultProfile(spec) + if err != nil { + return errors.Wrapf(err, "loading default seccomp profile failed") + } + spec.Linux.Seccomp = seccompConfig + default: + seccompProfile, err := ioutil.ReadFile(seccompProfilePath) + if err != nil { + return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + } + seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) + if err != nil { + return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + } + spec.Linux.Seccomp = seccompConfig + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go index a5b74bf092c..f33dd254a35 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -13,3 +13,11 @@ func setSeccomp(spec *specs.Spec) error { } return nil } + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + if spec.Linux != nil { + // runtime-tools may have supplied us with a default filter + spec.Linux.Seccomp = nil + } + return nil +} diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index bbf1727fbd5..ca597e22230 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -101,6 +101,9 @@ type CommitOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + // UnsetEnvs is a list of environments to not add to final image. + // Deprecated: use UnsetEnv() before committing instead. + UnsetEnvs []string } var ( @@ -227,6 +230,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options CommitOptions) (string, reference.Canonical, digest.Digest, error) { var ( imgID string + src types.ImageReference ) // If we weren't given a name, build a destination reference using a @@ -298,7 +302,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest)) // Build an image reference from which we can copy the finished image. - src, err := b.makeImageRef(options) + src, err = b.makeContainerImageRef(options) if err != nil { return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) } diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index a3da64e6bed..e009ed7631d 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -8,9 +8,12 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + "github.com/containers/buildah/util" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/stringid" @@ -28,18 +31,24 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference())) } if wantedManifestMIMEType != actualManifestMIMEType { + layerInfos := img.LayerInfos() + for i := range layerInfos { // force the "compression" to gzip, which is supported by all of the formats we care about + layerInfos[i].CompressionOperation = types.Compress + layerInfos[i].CompressionAlgorithm = &compression.Gzip + } updatedImg, err := img.UpdatedImage(ctx, types.ManifestUpdateOptions{ + LayerInfos: layerInfos, + }) + if err != nil { + return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference())) + } + secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{ ManifestMIMEType: wantedManifestMIMEType, - InformationOnly: types.ManifestUpdateInformation{ // Strictly speaking, every value in here is invalid. But… - Destination: nil, // Destination is technically required, but actually necessary only for conversion _to_ v2s1. Leave it nil, we will crash if that ever changes. - LayerInfos: nil, // LayerInfos is necessary for size information in v2s2/OCI manifests, but the code can work with nil, and we are not reading the converted manifest at all. - LayerDiffIDs: nil, // LayerDiffIDs are actually embedded in the converted manifest, but the code can work with nil, and the values are not needed until pushing the finished image, at which time containerImageRef.NewImageSource builds the values from scratch. - }, }) if err != nil { return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType) } - img = updatedImg + img = secondUpdatedImg } config, err := img.ConfigBlob(ctx) if err != nil { @@ -126,6 +135,10 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) { } else { b.SetArchitecture(runtime.GOARCH) } + // in case the arch string we started with was shorthand for a known arch+variant pair, normalize it + ps := platforms.Normalize(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()}) + b.SetArchitecture(ps.Architecture) + b.SetVariant(ps.Variant) } if b.Format == define.Dockerv2ImageManifest && b.Hostname() == "" { b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID())) @@ -190,6 +203,69 @@ func (b *Builder) SetOS(os string) { b.Docker.OS = os } +// OSVersion returns a version of the OS on which the container, or a container +// built using an image built from this container, is intended to be run. +func (b *Builder) OSVersion() string { + return b.OCIv1.OSVersion +} + +// SetOSVersion sets the version of the OS on which the container, or a +// container built using an image built from this container, is intended to be +// run. +func (b *Builder) SetOSVersion(version string) { + b.OCIv1.OSVersion = version + b.Docker.OSVersion = version +} + +// OSFeatures returns a list of OS features which the container, or a container +// built using an image built from this container, depends on the OS supplying. +func (b *Builder) OSFeatures() []string { + return copyStringSlice(b.OCIv1.OSFeatures) +} + +// SetOSFeature adds a feature of the OS which the container, or a container +// built using an image built from this container, depends on the OS supplying. +func (b *Builder) SetOSFeature(feature string) { + if !util.StringInSlice(feature, b.OCIv1.OSFeatures) { + b.OCIv1.OSFeatures = append(b.OCIv1.OSFeatures, feature) + } + if !util.StringInSlice(feature, b.Docker.OSFeatures) { + b.Docker.OSFeatures = append(b.Docker.OSFeatures, feature) + } +} + +// UnsetOSFeature removes a feature of the OS which the container, or a +// container built using an image built from this container, depends on the OS +// supplying. +func (b *Builder) UnsetOSFeature(feature string) { + if util.StringInSlice(feature, b.OCIv1.OSFeatures) { + features := make([]string, 0, len(b.OCIv1.OSFeatures)) + for _, f := range b.OCIv1.OSFeatures { + if f != feature { + features = append(features, f) + } + } + b.OCIv1.OSFeatures = features + } + if util.StringInSlice(feature, b.Docker.OSFeatures) { + features := make([]string, 0, len(b.Docker.OSFeatures)) + for _, f := range b.Docker.OSFeatures { + if f != feature { + features = append(features, f) + } + } + b.Docker.OSFeatures = features + } +} + +// ClearOSFeatures clears the list of features of the OS which the container, +// or a container built using an image built from this container, depends on +// the OS supplying. +func (b *Builder) ClearOSFeatures() { + b.OCIv1.OSFeatures = []string{} + b.Docker.OSFeatures = []string{} +} + // Architecture returns a name of the architecture on which the container, or a // container built using an image built from this container, is intended to be // run. @@ -205,6 +281,21 @@ func (b *Builder) SetArchitecture(arch string) { b.Docker.Architecture = arch } +// Variant returns a name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) Variant() string { + return b.OCIv1.Variant +} + +// SetVariant sets the name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) SetVariant(variant string) { + b.Docker.Variant = variant + b.OCIv1.Variant = variant +} + // Maintainer returns contact information for the person who built the image. func (b *Builder) Maintainer() string { return b.OCIv1.Author @@ -247,7 +338,7 @@ func (b *Builder) ClearOnBuild() { // discarded when writing images using OCIv1 formats. func (b *Builder) SetOnBuild(onBuild string) { if onBuild != "" && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild) + b.Logger.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild) } b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild) } @@ -279,7 +370,7 @@ func (b *Builder) Shell() []string { // discarded when writing images using OCIv1 formats. func (b *Builder) SetShell(shell []string) { if len(shell) > 0 && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell) + b.Logger.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell) } b.Docker.Config.Shell = copyStringSlice(shell) @@ -516,7 +607,7 @@ func (b *Builder) Domainname() string { // discarded when writing images using OCIv1 formats. func (b *Builder) SetDomainname(name string) { if name != "" && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name) + b.Logger.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name) } b.Docker.Config.Domainname = name } @@ -593,7 +684,7 @@ func (b *Builder) SetHealthcheck(config *docker.HealthConfig) { b.Docker.Config.Healthcheck = nil if config != nil { if b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("Healthcheck is not supported for OCI image format and will be ignored. Must use `docker` format") + b.Logger.Warnf("HEALTHCHECK is not supported for OCI image format and will be ignored. Must use `docker` format") } b.Docker.Config.Healthcheck = &docker.HealthConfig{ Test: copyStringSlice(config.Test), diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 1823e523840..00aa29ccc44 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -4,8 +4,10 @@ import ( "archive/tar" "bytes" "encoding/json" + stderrors "errors" "fmt" "io" + "io/fs" "io/ioutil" "net" "os" @@ -343,6 +345,9 @@ type PutOptions struct { ChmodDirs *os.FileMode // set permissions on newly-created directories ChownFiles *idtools.IDPair // set ownership of newly-created files ChmodFiles *os.FileMode // set permissions on newly-created files + StripSetuidBit bool // strip the setuid bit off of items being written + StripSetgidBit bool // strip the setgid bit off of items being written + StripStickyBit bool // strip the sticky bit off of items being written StripXattrs bool // don't bother trying to set extended attributes of items being copied IgnoreXattrErrors bool // ignore any errors encountered when attempting to set extended attributes IgnoreDevices bool // ignore items which are character or block devices @@ -1179,10 +1184,10 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa // we don't expand any of the contents that are archives options := req.GetOptions options.ExpandArchives = false - walkfn := func(path string, info os.FileInfo, err error) error { + walkfn := func(path string, d fs.DirEntry, err error) error { if err != nil { if options.IgnoreUnreadable && errorIsPermission(err) { - if info != nil && info.IsDir() { + if info != nil && d.IsDir() { return filepath.SkipDir } return nil @@ -1192,8 +1197,8 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } return errors.Wrapf(err, "copier: get: error reading %q", path) } - if info.Mode()&os.ModeType == os.ModeSocket { - logrus.Warningf("copier: skipping socket %q", info.Name()) + if d.Type() == os.ModeSocket { + logrus.Warningf("copier: skipping socket %q", d.Name()) return nil } // compute the path of this item @@ -1216,7 +1221,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return err } if skip { - if info.IsDir() { + if d.IsDir() { // if there are no "include // this anyway" patterns at // all, we don't need to @@ -1254,17 +1259,21 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } // if it's a symlink, read its target symlinkTarget := "" - if info.Mode()&os.ModeType == os.ModeSymlink { + if d.Type() == os.ModeSymlink { target, err := os.Readlink(path) if err != nil { return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path) } symlinkTarget = target } + info, err := d.Info() + if err != nil { + return err + } // if it's a directory and we're staying on one device, and it's on a // different device than the one we started from, skip its contents var ok error - if info.Mode().IsDir() && req.GetOptions.NoCrossDevice { + if d.IsDir() && req.GetOptions.NoCrossDevice { if !sameDevice(topInfo, info) { ok = filepath.SkipDir } @@ -1282,7 +1291,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return ok } // walk the directory tree, checking/adding items individually - if err := filepath.Walk(item, walkfn); err != nil { + if err := filepath.WalkDir(item, walkfn); err != nil { return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item) } itemsCopied++ @@ -1461,6 +1470,13 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str return errors.Wrapf(err, "error opening file for adding its contents to archive") } defer f.Close() + } else if hdr.Typeflag == tar.TypeDir { + // open the directory file first to make sure we can access it. + f, err = os.Open(contentPath) + if err != nil { + return errors.Wrapf(err, "error opening directory for adding its contents to archive") + } + defer f.Close() } // output the header if err = tw.WriteHeader(hdr); err != nil { @@ -1520,6 +1536,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID } } + directoryModes := make(map[string]os.FileMode) ensureDirectoryUnderRoot := func(directory string) error { rel, err := convertToRelSubdirectory(req.Root, directory) if err != nil { @@ -1533,8 +1550,10 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM if err = lchown(path, defaultDirUID, defaultDirGID); err != nil { return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, defaultDirUID, defaultDirGID) } - if err = os.Chmod(path, defaultDirMode); err != nil { - return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, defaultDirMode) + // make a conditional note to set this directory's permissions + // later, but not if we already had an explictly-provided mode + if _, ok := directoryModes[path]; !ok { + directoryModes[path] = defaultDirMode } } else { if !os.IsExist(err) { @@ -1544,6 +1563,20 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } return nil } + makeDirectoryWriteable := func(directory string) error { + st, err := os.Lstat(directory) + if err != nil { + return errors.Wrapf(err, "copier: put: error reading permissions of directory %q", directory) + } + mode := st.Mode() & os.ModePerm + if _, ok := directoryModes[directory]; !ok { + directoryModes[directory] = mode + } + if err = os.Chmod(directory, 0o700); err != nil { + return errors.Wrapf(err, "copier: put: error making directory %q writable", directory) + } + return nil + } createFile := func(path string, tr *tar.Reader) (int64, error) { f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) if err != nil && os.IsExist(err) { @@ -1553,7 +1586,21 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } } if err = os.RemoveAll(path); err != nil { - return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path) + if os.IsPermission(err) { + if err := makeDirectoryWriteable(filepath.Dir(path)); err != nil { + return 0, err + } + err = os.RemoveAll(path) + } + if err != nil { + return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path) + } + } + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) + } + if err != nil && os.IsPermission(err) { + if err = makeDirectoryWriteable(filepath.Dir(path)); err != nil { + return 0, err } f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600) } @@ -1597,6 +1644,11 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err) } } + for directory, mode := range directoryModes { + if err := os.Chmod(directory, mode); err != nil { + logrus.Debugf("error setting permissions of %q to 0%o: %v", directory, uint32(mode), err) + } + } }() ignoredItems := make(map[string]struct{}) tr := tar.NewReader(bulkReader) @@ -1637,6 +1689,15 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return err } // figure out what the permissions should be + if req.PutOptions.StripSetuidBit && hdr.Mode&cISUID == cISUID { + hdr.Mode &^= cISUID + } + if req.PutOptions.StripSetgidBit && hdr.Mode&cISGID == cISGID { + hdr.Mode &^= cISGID + } + if req.PutOptions.StripStickyBit && hdr.Mode&cISVTX == cISVTX { + hdr.Mode &^= cISVTX + } if hdr.Typeflag == tar.TypeDir { if req.PutOptions.ChmodDirs != nil { hdr.Mode = int64(*req.PutOptions.ChmodDirs) @@ -1660,7 +1721,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // only check the length if there wasn't an error, which we'll // check along with errors for other types of entries if err == nil && written != hdr.Size { - return errors.Errorf("copier: put: error creating %q: incorrect length (%d != %d)", path, written, hdr.Size) + return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size) } case tar.TypeLink: var linkTarget string @@ -1681,7 +1742,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = os.Link(linkTarget, path) } } @@ -1696,7 +1757,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)) } } @@ -1711,7 +1772,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))) } } @@ -1726,14 +1787,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))) } } case tar.TypeDir: if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) { var st os.FileInfo - if st, err = os.Stat(path); err == nil && !st.IsDir() { + if st, err = os.Lstat(path); err == nil && !st.IsDir() { // it's not a directory, so remove it and mkdir if err = os.Remove(path); err == nil { err = os.Mkdir(path, 0700) @@ -1751,6 +1812,9 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM atime: hdr.AccessTime, mtime: hdr.ModTime, }) + // set the mode here unconditionally, in case the directory is in + // the archive more than once for whatever reason + directoryModes[path] = mode case tar.TypeFifo: if err = mkfifo(path, 0600); err != nil && os.IsExist(err) { if req.PutOptions.NoOverwriteDirNonDir { @@ -1758,7 +1822,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mkfifo(path, 0600) } } @@ -1778,8 +1842,12 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM if err = lchown(path, hdr.Uid, hdr.Gid); err != nil { return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid) } - // set permissions, except for symlinks, since we don't have lchmod - if hdr.Typeflag != tar.TypeSymlink { + // set permissions, except for symlinks, since we don't + // have an lchmod, and directories, which we'll fix up + // on our way out so that we don't get tripped up by + // directories which we're not supposed to be able to + // write to, but which we'll need to create content in + if hdr.Typeflag != tar.TypeSymlink && hdr.Typeflag != tar.TypeDir { if err = os.Chmod(path, mode); err != nil { return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode) } @@ -1895,3 +1963,15 @@ func copierHandlerRemove(req request) *response { } return &response{Error: "", Remove: removeResponse{}} } + +func unwrapError(err error) error { + e := errors.Cause(err) + for e != nil { + err = e + e = stderrors.Unwrap(err) + if e == err { + break + } + } + return err +} diff --git a/vendor/github.com/containers/buildah/copier/unwrap_112.go b/vendor/github.com/containers/buildah/copier/unwrap_112.go deleted file mode 100644 index ebbad08b991..00000000000 --- a/vendor/github.com/containers/buildah/copier/unwrap_112.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go113 - -package copier - -import ( - "github.com/pkg/errors" -) - -func unwrapError(err error) error { - return errors.Cause(err) -} diff --git a/vendor/github.com/containers/buildah/copier/unwrap_113.go b/vendor/github.com/containers/buildah/copier/unwrap_113.go deleted file mode 100644 index cd0d0fb6871..00000000000 --- a/vendor/github.com/containers/buildah/copier/unwrap_113.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go113 - -package copier - -import ( - stderror "errors" - - "github.com/pkg/errors" -) - -func unwrapError(err error) error { - e := errors.Cause(err) - for e != nil { - err = e - e = errors.Unwrap(err) - } - return err -} diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go index c757adcc887..aef0f440381 100644 --- a/vendor/github.com/containers/buildah/copier/xattrs.go +++ b/vendor/github.com/containers/buildah/copier/xattrs.go @@ -16,7 +16,9 @@ const ( ) var ( - relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others + relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others + initialXattrListSize = 64 * 1024 + initialXattrValueSize = 64 * 1024 ) // isRelevantXattr checks if "attribute" matches one of the attribute patterns @@ -35,7 +37,7 @@ func isRelevantXattr(attribute string) bool { // Lgetxattrs returns a map of the relevant extended attributes set on the given file. func Lgetxattrs(path string) (map[string]string, error) { maxSize := 64 * 1024 * 1024 - listSize := 64 * 1024 + listSize := initialXattrListSize var list []byte for listSize < maxSize { list = make([]byte, listSize) @@ -61,7 +63,7 @@ func Lgetxattrs(path string) (map[string]string, error) { m := make(map[string]string) for _, attribute := range strings.Split(string(list), string('\000')) { if isRelevantXattr(attribute) { - attributeSize := 64 * 1024 + attributeSize := initialXattrValueSize var attributeValue []byte for attributeSize < maxSize { attributeValue = make([]byte, attributeSize) diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index cff9a3d830b..568be203cf6 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -4,6 +4,7 @@ import ( "io" "time" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage/pkg/archive" @@ -28,6 +29,8 @@ type CommonBuildOptions struct { CPUSetMems string // HTTPProxy determines whether *_proxy env vars from the build host are passed into the container. HTTPProxy bool + // IdentityLabel if set ensures that default `io.buildah.version` label is not applied to build image. + IdentityLabel types.OptionalBool // Memory is the upper limit (in bytes) on how much memory running containers can use. Memory int64 // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf @@ -36,11 +39,14 @@ type CommonBuildOptions struct { DNSServers []string // DNSOptions is the list of DNS DNSOptions []string - // MemorySwap limits the amount of memory and swap together. - MemorySwap int64 // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". // Recognized field names are "role", "type", and "level". LabelOpts []string + // MemorySwap limits the amount of memory and swap together. + MemorySwap int64 + // NoHosts tells the builder not to create /etc/hosts content when running + // containers. + NoHosts bool // OmitTimestamp forces epoch 0 as created timestamp to allow for // deterministic, content-addressable builds. OmitTimestamp bool @@ -70,7 +76,9 @@ type CommonBuildOptions struct { Ulimit []string // Volumes to bind mount into the container Volumes []string - // Secrets are the available secrets to use in a build + // Secrets are the available secrets to use in a build. Each item in the + // slice takes the form "id=foo,src=bar", where both "id" and "src" are + // required, in that order, and "bar" is the name of a file. Secrets []string // SSHSources is the available ssh agent connections to forward in the build SSHSources []string @@ -78,6 +86,8 @@ type CommonBuildOptions struct { // BuildOptions can be used to alter how an image is built. type BuildOptions struct { + // ContainerSuffix it the name to suffix containers with + ContainerSuffix string // ContextDirectory is the default source location for COPY and ADD // commands. ContextDirectory string @@ -113,6 +123,10 @@ type BuildOptions struct { Args map[string]string // Name of the image to write to. Output string + // BuildOutput specifies if any custom build output is selected for following build. + // It allows end user to export recently built rootfs into a directory or tar. + // See the documentation of 'buildah build --output' for the details of the format. + BuildOutput string // Additional tags to add to the image that we write, if we know of a // way to add them. AdditionalTags []string @@ -157,6 +171,10 @@ type BuildOptions struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use if we're setting up our own user namespace // when handling RUN instructions. IDMappingOptions *IDMappingOptions @@ -227,6 +245,8 @@ type BuildOptions struct { RusageLogFile string // Excludes is a list of excludes to be used instead of the .dockerignore file. Excludes []string + // IgnoreFile is a name of the .containerignore file + IgnoreFile string // From is the image name to use to replace the value specified in the first // FROM instruction in the Containerfile From string @@ -234,4 +254,20 @@ type BuildOptions struct { // to build the image for. If this slice has items in it, the OS and // Architecture fields above are ignored. Platforms []struct{ OS, Arch, Variant string } + // AllPlatforms tells the builder to set the list of target platforms + // to match the set of platforms for which all of the build's base + // images are available. If this field is set, Platforms is ignored. + AllPlatforms bool + // UnsetEnvs is a list of environments to not add to final image. + UnsetEnvs []string + // Envs is a list of environment variables to set in the final image. + Envs []string + // OSFeatures specifies operating system features the image requires. + // It is typically only set when the OS is "windows". + OSFeatures []string + // OSVersion specifies the exact operating system version the image + // requires. It is typically only set when the OS is "windows". Any + // value set in a base image will be preserved, so this does not + // frequently need to be set. + OSVersion string } diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 4f3ebf01a49..459a161cdd6 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,15 +29,11 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.23.1" + Version = "1.26.1" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" - DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin" - // DefaultCNIConfigDir is the default location of CNI configuration files. - DefaultCNIConfigDir = "/etc/cni/net.d" - // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, // suitable for specifying as a value of the PreferredManifestType // member of a CommitOptions structure. It is also the default. @@ -93,6 +89,20 @@ type IDMappingOptions struct { GIDMap []specs.LinuxIDMapping } +// Secret is a secret source that can be used in a RUN +type Secret struct { + ID string + Source string + SourceType string +} + +// BuildOutputOptions contains the the outcome of parsing the value of a build --output flag +type BuildOutputOption struct { + Path string // Only valid if !IsStdout + IsDir bool + IsStdout bool +} + // TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along @@ -117,12 +127,12 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err return "", "", errors.Wrapf(err, "error parsing url %q", url) } if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") { - err = cloneToDirectory(url, name) + combinedOutput, err := cloneToDirectory(url, name) if err != nil { if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } - return "", "", err + return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput)) } return name, "", nil } @@ -160,7 +170,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err return "", "", errors.Errorf("unreachable code reached") } -func cloneToDirectory(url, dir string) error { +func cloneToDirectory(url, dir string) ([]byte, error) { gitBranch := strings.Split(url, "#") var cmd *exec.Cmd if len(gitBranch) < 2 { @@ -170,7 +180,7 @@ func cloneToDirectory(url, dir string) error { logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir) } - return cmd.Run() + return cmd.CombinedOutput() } func downloadToDirectory(url, dir string) error { diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go index 561287ac276..b0ed2e4c021 100644 --- a/vendor/github.com/containers/buildah/docker/types.go +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -151,6 +151,8 @@ type V1Image struct { Config *Config `json:"config,omitempty"` // Architecture is the hardware that the image is build and runs on Architecture string `json:"architecture,omitempty"` + // Variant is a variant of the CPU that the image is built and runs on + Variant string `json:"variant,omitempty"` // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // Size is the total size of the image including all layers it is composed of diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index ef05f37d26e..e3668bd0ddf 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -43,6 +43,15 @@ const ( Dockerv2ImageManifest = define.Dockerv2ImageManifest ) +// ExtractRootfsOptions is consumed by ExtractRootfs() which allows +// users to preserve nature of various modes like setuid, setgid and xattrs +// over the extracted file system objects. +type ExtractRootfsOptions struct { + StripSetuidBit bool // strip the setuid bit off of items being extracted. + StripSetgidBit bool // strip the setgid bit off of items being extracted. + StripXattrs bool // don't record extended attributes of items being extracted. +} + type containerImageRef struct { fromImageName string fromImageID string @@ -150,7 +159,10 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om } // Extract the container's whole filesystem as if it were a single layer. -func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { +// Takes ExtractRootfsOptions as argument which allows caller to configure +// preserve nature of setuid,setgid,sticky and extended attributes +// on extracted rootfs. +func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { var uidMap, gidMap []idtools.IDMap mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) if err != nil { @@ -164,8 +176,11 @@ func (i *containerImageRef) extractRootfs() (io.ReadCloser, chan error, error) { uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap) } copierOptions := copier.GetOptions{ - UIDMap: uidMap, - GIDMap: gidMap, + UIDMap: uidMap, + GIDMap: gidMap, + StripSetuidBit: opts.StripSetuidBit, + StripSetgidBit: opts.StripSetgidBit, + StripXattrs: opts.StripXattrs, } err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) errChan <- err @@ -239,6 +254,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, Versioned: specs.Versioned{ SchemaVersion: 2, }, + MediaType: v1.MediaTypeImageManifest, Config: v1.Descriptor{ MediaType: v1.MediaTypeImageConfig, }, @@ -375,7 +391,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System var errChan chan error if i.squash { // Extract the root filesystem as a single layer. - rc, errChan, err = i.extractRootfs() + rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{}) if err != nil { return nil, err } @@ -737,7 +753,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil } -func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, error) { +func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { @@ -752,6 +768,10 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err if manifestType == "" { manifestType = define.OCIv1ImageManifest } + + for _, u := range options.UnsetEnvs { + b.UnsetEnv(u) + } oconfig, err := json.Marshal(&b.OCIv1) if err != nil { return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1) @@ -808,3 +828,12 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err } return ref, nil } + +// Extract the container's whole filesystem as if it were a single layer from current builder instance +func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { + src, err := b.makeContainerImageRef(options) + if err != nil { + return nil, nil, errors.Wrapf(err, "error creating image reference for container %q to extract its contents", b.ContainerID) + } + return src.extractRootfs(opts) +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index bdb407885b2..cf0a7cfbacf 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -10,15 +10,19 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "sync" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" "github.com/containers/buildah/util" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/shortnames" istorage "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" "github.com/containers/storage" @@ -58,6 +62,10 @@ type BuildOptions = define.BuildOptions // returns the ID of the built image, and if a name was assigned to it, a // canonical reference for that image. func BuildDockerfiles(ctx context.Context, store storage.Store, options define.BuildOptions, paths ...string) (id string, ref reference.Canonical, err error) { + if options.CommonBuildOpts == nil { + options.CommonBuildOpts = &define.CommonBuildOptions{} + } + if len(paths) == 0 { return "", nil, errors.Errorf("error building: no dockerfiles specified") } @@ -168,8 +176,17 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B files = append(files, b.Bytes()) } - if options.Jobs != nil && *options.Jobs != 0 { - options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs)) + if options.JobSemaphore == nil { + if options.Jobs != nil { + if *options.Jobs < 0 { + return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") + } + if *options.Jobs > 0 { + options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs)) + } + } else { + options.JobSemaphore = semaphore.NewWeighted(1) + } } manifestList := options.Manifest @@ -193,22 +210,44 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B }) } + if options.AllPlatforms { + options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.SystemContext) + if err != nil { + return "", nil, err + } + } + systemContext := options.SystemContext for _, platform := range options.Platforms { platformContext := *systemContext - platformContext.OSChoice = platform.OS - platformContext.ArchitectureChoice = platform.Arch - platformContext.VariantChoice = platform.Variant + platformSpec := platforms.Normalize(v1.Platform{ + OS: platform.OS, + Architecture: platform.Arch, + Variant: platform.Variant, + }) + // platforms.Normalize converts an empty os value to GOOS + // so we have to check the original value here to not overwrite the default for no reason + if platform.OS != "" { + platformContext.OSChoice = platformSpec.OS + } + if platform.Arch != "" { + platformContext.ArchitectureChoice = platformSpec.Architecture + platformContext.VariantChoice = platformSpec.Variant + } platformOptions := options platformOptions.SystemContext = &platformContext + platformOptions.OS = platformContext.OSChoice + platformOptions.Architecture = platformContext.ArchitectureChoice logPrefix := "" if len(options.Platforms) > 1 { - logPrefix = "[" + platform.OS + "/" + platform.Arch - if platform.Variant != "" { - logPrefix += "/" + platform.Variant - } - logPrefix += "] " + logPrefix = "[" + platforms.Format(platformSpec) + "] " + } + // Deep copy args to prevent concurrent read/writes over Args. + argsCopy := make(map[string]string) + for key, value := range options.Args { + argsCopy[key] = value } + platformOptions.Args = argsCopy builds.Go(func() error { thisID, thisRef, err := buildDockerfilesOnce(ctx, store, logger, logPrefix, platformOptions, paths, files) if err != nil { @@ -217,12 +256,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B id, ref = thisID, thisRef instancesLock.Lock() instances = append(instances, instance{ - ID: thisID, - Platform: v1.Platform{ - OS: platformContext.OSChoice, - Architecture: platformContext.ArchitectureChoice, - Variant: platformContext.VariantChoice, - }, + ID: thisID, + Platform: platformSpec, }) instancesLock.Unlock() return nil @@ -311,6 +346,40 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr warnOnUnsetBuildArgs(logger, mainNode, options.Args) + // --platform was explicitly selected for this build + // so set correct TARGETPLATFORM in args if it is not + // already selected by the user. + if options.SystemContext.OSChoice != "" && options.SystemContext.ArchitectureChoice != "" { + // os component from --platform string populates TARGETOS + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETOS"]; !ok { + options.Args["TARGETOS"] = options.SystemContext.OSChoice + } + // arch component from --platform string populates TARGETARCH + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETARCH"]; !ok { + options.Args["TARGETARCH"] = options.SystemContext.ArchitectureChoice + } + // variant component from --platform string populates TARGETVARIANT + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETVARIANT"]; !ok { + if options.SystemContext.VariantChoice != "" { + options.Args["TARGETVARIANT"] = options.SystemContext.VariantChoice + } + } + // buildkit parity: give priority to user's `--build-arg` + if _, ok := options.Args["TARGETPLATFORM"]; !ok { + // buildkit parity: TARGETPLATFORM should be always created + // from SystemContext and not `TARGETOS` and `TARGETARCH` because + // users can always override values of `TARGETOS` and `TARGETARCH` + // but `TARGETPLATFORM` should be set independent of those values. + options.Args["TARGETPLATFORM"] = options.SystemContext.OSChoice + "/" + options.SystemContext.ArchitectureChoice + if options.SystemContext.VariantChoice != "" { + options.Args["TARGETPLATFORM"] = options.Args["TARGETPLATFORM"] + "/" + options.SystemContext.VariantChoice + } + } + } + for i, d := range dockerfilecontents[1:] { additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) if err != nil { @@ -318,6 +387,35 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr } mainNode.Children = append(mainNode.Children, additionalNode.Children...) } + + // Check if any modifications done to labels + // add them to node-layer so it becomes regular + // layer. + // Reason: Docker adds label modification as + // last step which can be processed as regular + // steps and if no modification is done to layers + // its easier to re-use cached layers. + if len(options.Labels) > 0 { + for _, labelSpec := range options.Labels { + label := strings.SplitN(labelSpec, "=", 2) + labelLine := "" + key := label[0] + value := "" + if len(label) > 1 { + value = label[1] + } + // check from only empty key since docker supports empty value + if key != "" { + labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value) + additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine)) + if err != nil { + return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps") + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + } + } + exec, err := newExecutor(logger, logPrefix, store, options, mainNode) if err != nil { return "", nil, errors.Wrapf(err, "error creating build executor") @@ -373,8 +471,8 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string cppCommand := "cpp" cppPath, err := exec.LookPath(cppCommand) if err != nil { - if os.IsNotExist(err) { - err = errors.Errorf("error: %s support requires %s to be installed", containerfile, cppPath) + if errors.Is(err, exec.ErrNotFound) { + err = fmt.Errorf("error: %v: .in support requires %s to be installed", err, cppCommand) } return nil, err } @@ -400,3 +498,194 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string } return &stdoutBuffer, nil } + +// platformsForBaseImages resolves the names of base images from the +// dockerfiles, and if they are all valid references to manifest lists, returns +// the list of platforms that are supported by all of the base images. +func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) { + baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args) + if err != nil { + return nil, errors.Wrapf(err, "determining list of base images") + } + logrus.Debugf("unresolved base images: %v", baseImages) + if len(baseImages) == 0 { + return nil, errors.Wrapf(err, "build uses no non-scratch base images") + } + targetPlatforms := make(map[string]struct{}) + var platformList []struct{ OS, Arch, Variant string } + for baseImageIndex, baseImage := range baseImages { + resolved, err := shortnames.Resolve(systemContext, baseImage) + if err != nil { + return nil, errors.Wrapf(err, "resolving image name %q", baseImage) + } + var manifestBytes []byte + var manifestType string + for _, candidate := range resolved.PullCandidates { + ref, err := docker.NewReference(candidate.Value) + if err != nil { + logrus.Debugf("parsing image reference %q: %v", candidate.Value.String(), err) + continue + } + src, err := ref.NewImageSource(ctx, systemContext) + if err != nil { + logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err) + continue + } + candidateBytes, candidateType, err := src.GetManifest(ctx, nil) + _ = src.Close() + if err != nil { + logrus.Debugf("reading image manifest for %q: %v", baseImage, err) + continue + } + if !manifest.MIMETypeIsMultiImage(candidateType) { + logrus.Debugf("base image %q is not a reference to a manifest list: %v", baseImage, err) + continue + } + if err := candidate.Record(); err != nil { + logrus.Debugf("error recording name %q for base image %q: %v", candidate.Value.String(), baseImage, err) + continue + } + baseImage = candidate.Value.String() + manifestBytes, manifestType = candidateBytes, candidateType + break + } + if len(manifestBytes) == 0 { + if len(resolved.PullCandidates) > 0 { + return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage) + } + return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage) + } + if manifestType != v1.MediaTypeImageIndex { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage) + } + list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex) + if err != nil { + return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage) + } + manifestBytes, err = list.Serialize() + if err != nil { + return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage) + } + } + index, err := manifest.OCI1IndexFromManifest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage) + } + if baseImageIndex == 0 { + // populate the list with the first image's normalized platforms + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + targetPlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + } else { + // prune the list of any normalized platforms this base image doesn't support + imagePlatforms := make(map[string]struct{}) + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + imagePlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + var removed []string + for platform := range targetPlatforms { + if _, present := imagePlatforms[platform]; !present { + removed = append(removed, platform) + logger.Debugf("image %q does not support %q", baseImage, platform) + } + } + for _, remove := range removed { + delete(targetPlatforms, remove) + } + } + if baseImageIndex == len(baseImages)-1 && len(targetPlatforms) > 0 { + // extract the list + for platform := range targetPlatforms { + platform, err := platforms.Parse(platform) + if err != nil { + return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform) + } + platformList = append(platformList, struct{ OS, Arch, Variant string }{ + OS: platform.OS, + Arch: platform.Architecture, + Variant: platform.Variant, + }) + logger.Debugf("base images all support %q", platform) + } + } + } + if len(platformList) == 0 { + return nil, errors.New("base images have no platforms in common") + } + return platformList, nil +} + +// baseImages parses the dockerfilecontents, possibly replacing the first +// stage's base image with FROM, and returns the list of base images as +// provided. Each entry in the dockerfilenames slice corresponds to a slice in +// dockerfilecontents. +func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string) ([]string, error) { + mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) + if err != nil { + return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0]) + } + + for i, d := range dockerfilecontents[1:] { + additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) + if err != nil { + return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i]) + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + + b := imagebuilder.NewBuilder(args) + defaultContainerConfig, err := config.Default() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container config") + } + b.Env = defaultContainerConfig.GetDefaultEnv() + stages, err := imagebuilder.NewStages(mainNode, b) + if err != nil { + return nil, errors.Wrap(err, "error reading multiple stages") + } + var baseImages []string + nicknames := make(map[string]bool) + for stageIndex, stage := range stages { + node := stage.Node // first line + for node != nil { // each line + for _, child := range node.Children { // tokens on this line, though we only care about the first + switch strings.ToUpper(child.Value) { // first token - instruction + case "FROM": + if child.Next != nil { // second token on this line + // If we have a fromOverride, replace the value of + // image name for the first FROM in the Containerfile. + if from != "" { + child.Next.Value = from + from = "" + } + base := child.Next.Value + if base != "scratch" && !nicknames[base] { + // TODO: this didn't undergo variable and arg + // expansion, so if the AS clause in another + // FROM instruction uses argument values, + // we might not record the right value here. + baseImages = append(baseImages, base) + } + } + } + } + node = node.Next // next line + } + if stage.Name != strconv.Itoa(stageIndex) { + nicknames[stage.Name] = true + } + } + return baseImages, nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 78606d2b4ec..6b63b5162bd 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -18,6 +18,7 @@ import ( "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" "github.com/containers/common/libimage" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -57,6 +58,7 @@ var builtinAllowedBuildArgs = map[string]bool{ // interface. It coordinates the entire build by using one or more // StageExecutors to handle each stage of the build. type Executor struct { + containerSuffix string logger *logrus.Logger stages map[string]*StageExecutor store storage.Store @@ -84,47 +86,57 @@ type Executor struct { configureNetwork define.NetworkConfigurationPolicy cniPluginPath string cniConfigDir string - idmappingOptions *define.IDMappingOptions - commonBuildOptions *define.CommonBuildOptions - defaultMountsFilePath string - iidfile string - squash bool - labels []string - annotations []string - layers bool - useCache bool - removeIntermediateCtrs bool - forceRmIntermediateCtrs bool - imageMap map[string]string // Used to map images that we create to handle the AS construct. - containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. - baseMap map[string]bool // Holds the names of every base image, as given. - rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. - blobDirectory string - excludes []string - unusedArgs map[string]struct{} - capabilities []string - devices define.ContainerDevices - signBy string - architecture string - timestamp *time.Time - os string - maxPullPushRetries int - retryPullPushDelay time.Duration - ociDecryptConfig *encconfig.DecryptConfig - lastError error - terminatedStage map[string]error - stagesLock sync.Mutex - stagesSemaphore *semaphore.Weighted - jobs int - logRusage bool - rusageLogFile io.Writer - imageInfoLock sync.Mutex - imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs - fromOverride string - manifest string - secrets map[string]string - sshsources map[string]*sshagent.Source - logPrefix string + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + networkInterface nettypes.ContainerNetwork + idmappingOptions *define.IDMappingOptions + commonBuildOptions *define.CommonBuildOptions + defaultMountsFilePath string + iidfile string + squash bool + labels []string + annotations []string + layers bool + noHosts bool + useCache bool + removeIntermediateCtrs bool + forceRmIntermediateCtrs bool + imageMap map[string]string // Used to map images that we create to handle the AS construct. + containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. + baseMap map[string]bool // Holds the names of every base image, as given. + rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. + blobDirectory string + excludes []string + ignoreFile string + unusedArgs map[string]struct{} + capabilities []string + devices define.ContainerDevices + signBy string + architecture string + timestamp *time.Time + os string + maxPullPushRetries int + retryPullPushDelay time.Duration + ociDecryptConfig *encconfig.DecryptConfig + lastError error + terminatedStage map[string]error + stagesLock sync.Mutex + stagesSemaphore *semaphore.Weighted + logRusage bool + rusageLogFile io.Writer + imageInfoLock sync.Mutex + imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs + fromOverride string + manifest string + secrets map[string]define.Secret + sshsources map[string]*sshagent.Source + logPrefix string + unsetEnvs []string + processLabel string // Shares processLabel of first stage container with containers of other stages in same build + mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build + buildOutput string // Specifies instructions for any custom build output + osVersion string + osFeatures []string + envs []string } type imageTypeAndHistoryAndDiffIDs struct { @@ -143,7 +155,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o excludes := options.Excludes if len(excludes) == 0 { - excludes, err = imagebuilder.ParseDockerignore(options.ContextDirectory) + excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile) if err != nil { return nil, err } @@ -179,10 +191,6 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o if err != nil { return nil, err } - jobs := 1 - if options.Jobs != nil { - jobs = *options.Jobs - } writer := options.ReportWriter if options.Quiet { @@ -203,11 +211,13 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o } exec := Executor{ + containerSuffix: options.ContainerSuffix, logger: logger, stages: make(map[string]*StageExecutor), store: store, contextDir: options.ContextDirectory, excludes: excludes, + ignoreFile: options.IgnoreFile, pullPolicy: options.PullPolicy, registry: options.Registry, ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, @@ -231,6 +241,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o configureNetwork: options.ConfigureNetwork, cniPluginPath: options.CNIPluginPath, cniConfigDir: options.CNIConfigDir, + networkInterface: options.NetworkInterface, idmappingOptions: options.IDMappingOptions, commonBuildOptions: options.CommonBuildOpts, defaultMountsFilePath: options.DefaultMountsFilePath, @@ -239,6 +250,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o labels: append([]string{}, options.Labels...), annotations: append([]string{}, options.Annotations...), layers: options.Layers, + noHosts: options.CommonBuildOpts.NoHosts, useCache: !options.NoCache, removeIntermediateCtrs: options.RemoveIntermediateCtrs, forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, @@ -259,7 +271,6 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o ociDecryptConfig: options.OciDecryptConfig, terminatedStage: make(map[string]error), stagesSemaphore: options.JobSemaphore, - jobs: jobs, logRusage: options.LogRusage, rusageLogFile: rusageLogFile, imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs), @@ -268,6 +279,11 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o secrets: secrets, sshsources: sshsources, logPrefix: logPrefix, + unsetEnvs: append([]string{}, options.UnsetEnvs...), + buildOutput: options.BuildOutput, + osVersion: options.OSVersion, + osFeatures: append([]string{}, options.OSFeatures...), + envs: append([]string{}, options.Envs...), } if exec.err == nil { exec.err = os.Stderr @@ -292,9 +308,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o // and value, or just an argument, since they can be // separated by either "=" or whitespace. list := strings.SplitN(arg.Value, "=", 2) - if _, stillUnused := exec.unusedArgs[list[0]]; stillUnused { - delete(exec.unusedArgs, list[0]) - } + delete(exec.unusedArgs, list[0]) } } break @@ -353,7 +367,7 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) { found := false for _, otherStage := range stages { - if otherStage.Name == name || fmt.Sprintf("%d", otherStage.Position) == name { + if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name { found = true break } @@ -521,9 +535,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image lastErr = err } } + cleanupStages = nil b.stagesLock.Unlock() - cleanupStages = nil // Clean up any builders that we used to get data from images. for _, builder := range b.containerMap { if err := builder.Delete(); err != nil { @@ -595,11 +609,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } base := child.Next.Value if base != "scratch" { - // TODO: this didn't undergo variable and arg - // expansion, so if the AS clause in another - // FROM instruction uses argument values, - // we might not record the right value here. - b.baseMap[base] = true + userArgs := argsMapToSlice(stage.Builder.Args) + baseWithArg, err := imagebuilder.ProcessWord(base, userArgs) + if err != nil { + return "", nil, errors.Wrapf(err, "while replacing arg variables with values for format %q", base) + } + b.baseMap[baseWithArg] = true logrus.Debugf("base for stage %d: %q", stageIndex, base) } } @@ -628,36 +643,53 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image Error error } - ch := make(chan Result) + ch := make(chan Result, len(stages)) if b.stagesSemaphore == nil { - jobs := int64(b.jobs) - if jobs < 0 { - return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") - } else if jobs == 0 { - jobs = int64(len(stages)) - } - - b.stagesSemaphore = semaphore.NewWeighted(jobs) + b.stagesSemaphore = semaphore.NewWeighted(int64(len(stages))) } var wg sync.WaitGroup wg.Add(len(stages)) go func() { + cancel := false for stageIndex := range stages { index := stageIndex // Acquire the semaphore before creating the goroutine so we are sure they // run in the specified order. if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { + cancel = true b.lastError = err - return + ch <- Result{ + Index: index, + Error: err, + } + wg.Done() + continue } + b.stagesLock.Lock() + cleanupStages := cleanupStages + b.stagesLock.Unlock() go func() { defer b.stagesSemaphore.Release(1) defer wg.Done() + if cancel || cleanupStages == nil { + var err error + if stages[index].Name != strconv.Itoa(index) { + err = errors.Errorf("not building stage %d: build canceled", index) + } else { + err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name) + } + ch <- Result{ + Index: index, + Error: err, + } + return + } stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index) if stageErr != nil { + cancel = true ch <- Result{ Index: index, Error: stageErr, @@ -684,7 +716,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image b.stagesLock.Lock() b.terminatedStage[stage.Name] = r.Error - b.terminatedStage[fmt.Sprintf("%d", stage.Position)] = r.Error + b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error if r.Error != nil { b.stagesLock.Unlock() diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index ad0caed2877..01b70369bb9 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -15,8 +15,12 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" buildahdocker "github.com/containers/buildah/docker" + "github.com/containers/buildah/internal" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/rusage" "github.com/containers/buildah/util" + config "github.com/containers/common/pkg/config" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -25,6 +29,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/unshare" docker "github.com/fsouza/go-dockerclient" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -47,21 +52,22 @@ import ( // If we're naming the result of the build, only the last stage will apply that // name to the image that it produces. type StageExecutor struct { - ctx context.Context - executor *Executor - log func(format string, args ...interface{}) - index int - stages imagebuilder.Stages - name string - builder *buildah.Builder - preserved int - volumes imagebuilder.VolumeSet - volumeCache map[string]string - volumeCacheInfo map[string]os.FileInfo - mountPoint string - output string - containerIDs []string - stage *imagebuilder.Stage + ctx context.Context + executor *Executor + log func(format string, args ...interface{}) + index int + stages imagebuilder.Stages + name string + builder *buildah.Builder + preserved int + volumes imagebuilder.VolumeSet + volumeCache map[string]string + volumeCacheInfo map[string]os.FileInfo + mountPoint string + output string + containerIDs []string + stage *imagebuilder.Stage + argsFromContainerfile []string } // Preserve informs the stage executor that from this point on, it needs to @@ -401,6 +407,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err PreserveOwnership: preserveOwnership, ContextDir: contextDir, Excludes: copyExcludes, + IgnoreFile: s.executor.ignoreFile, IDMappingOptions: idMappingOptions, StripSetuidBit: stripSetuid, StripSetgidBit: stripSetgid, @@ -412,10 +419,67 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err return nil } +// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided +// Stage can automatically cleanup this mounts when a stage is removed +// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run. +// stages mounted here will we used be Run(). +func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) { + stageMountPoints := make(map[string]internal.StageMountDetails) + for _, flag := range mountList { + if strings.Contains(flag, "from") { + arr := strings.SplitN(flag, ",", 2) + if len(arr) < 2 { + return nil, errors.Errorf("Invalid --mount command: %s", flag) + } + tokens := strings.Split(arr[1], ",") + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "from": + if len(kv) == 1 { + return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument") + } + if kv[1] == "" { + return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value") + } + from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments()) + if fromErr != nil { + return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1]) + } + // If the source's name corresponds to the + // result of an earlier stage, wait for that + // stage to finish being built. + if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { + return nil, err + } + if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { + stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: otherStage.mountPoint} + break + } else { + mountPoint, err := s.getImageRootfs(s.ctx, from) + if err != nil { + return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from) + } + stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint} + break + } + default: + continue + } + } + } + } + return stageMountPoints, nil +} + // Run executes a RUN instruction using the stage's current working container // as a root directory. func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { logrus.Debugf("RUN %#v, %#v", run, config) + stageMountPoints, err := s.runStageMountPoints(run.Mounts) + if err != nil { + return err + } if s.builder == nil { return errors.Errorf("no build container available") } @@ -433,12 +497,14 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { Hostname: config.Hostname, Runtime: s.executor.runtime, Args: s.executor.runtimeArgs, + NoHosts: s.executor.noHosts, NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", Mounts: append([]Mount{}, s.executor.transientMounts...), Env: config.Env, User: config.User, WorkingDir: config.WorkingDir, Entrypoint: config.Entrypoint, + ContextDir: s.executor.contextDir, Cmd: config.Cmd, Stdin: stdin, Stdout: s.executor.out, @@ -449,6 +515,8 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { Secrets: s.executor.secrets, SSHSources: s.executor.sshsources, RunMounts: run.Mounts, + StageMountPoints: stageMountPoints, + SystemContext: s.executor.systemContext, } if config.NetworkDisabled { options.ConfigureNetwork = buildah.NetworkDisabled @@ -533,20 +601,38 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo } } + builderSystemContext := s.executor.systemContext + // get platform string from stage + if stage.Builder.Platform != "" { + os, arch, variant, err := parse.Platform(stage.Builder.Platform) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform) + } + if arch != "" || variant != "" { + builderSystemContext.ArchitectureChoice = arch + builderSystemContext.VariantChoice = variant + } + if os != "" { + builderSystemContext.OSChoice = os + } + } + builderOptions := buildah.BuilderOptions{ Args: ib.Args, FromImage: from, PullPolicy: pullPolicy, + ContainerSuffix: s.executor.containerSuffix, Registry: s.executor.registry, BlobDirectory: s.executor.blobDirectory, SignaturePolicyPath: s.executor.signaturePolicyPath, ReportWriter: s.executor.reportWriter, - SystemContext: s.executor.systemContext, + SystemContext: builderSystemContext, Isolation: s.executor.isolation, NamespaceOptions: s.executor.namespaceOptions, ConfigureNetwork: s.executor.configureNetwork, CNIPluginPath: s.executor.cniPluginPath, CNIConfigDir: s.executor.cniConfigDir, + NetworkInterface: s.executor.networkInterface, IDMappingOptions: s.executor.idmappingOptions, CommonBuildOpts: s.executor.commonBuildOptions, DefaultMountsFilePath: s.executor.defaultMountsFilePath, @@ -556,6 +642,9 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo MaxPullRetries: s.executor.maxPullPushRetries, PullRetryDelay: s.executor.retryPullPushDelay, OciDecryptConfig: s.executor.ociDecryptConfig, + Logger: s.executor.logger, + ProcessLabel: s.executor.processLabel, + MountLabel: s.executor.mountLabel, } builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions) @@ -563,6 +652,16 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo return nil, errors.Wrapf(err, "error creating build container") } + // If executor's ProcessLabel and MountLabel is empty means this is the first stage + // Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages + // Doing this will ensure and one stage in same build can mount another stage even if `selinux` + // is enabled. + + if s.executor.mountLabel == "" && s.executor.processLabel == "" { + s.executor.mountLabel = builder.MountLabel + s.executor.processLabel = builder.ProcessLabel + } + if initializeIBConfig { volumes := map[string]struct{}{} for _, v := range builder.Volumes() { @@ -582,6 +681,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo Volumes: volumes, WorkingDir: builder.WorkDir(), Entrypoint: builder.Entrypoint(), + Healthcheck: (*docker.HealthConfig)(builder.Healthcheck()), Labels: builder.Labels(), Shell: builder.Shell(), StopSignal: builder.StopSignal(), @@ -673,8 +773,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, checkForLayers := s.executor.layers && s.executor.useCache moreStages := s.index < len(s.stages)-1 lastStage := !moreStages - imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)]) - rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)]) + imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)]) + rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)]) // If the base image's name corresponds to the result of an earlier // stage, make sure that stage has finished building an image, and @@ -971,7 +1071,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } - if cacheID != "" && !(s.executor.squash && lastInstruction) { + // We want to save history for other layers during a squashed build. + // Toggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + squashToggle := false + // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. + // So only perform commit if its the lastInstruction of lastStage. + if cacheID != "" { logCacheHit(cacheID) // A suitable cached image was found, so we can just // reuse it. If we need to add a name to the resulting @@ -985,6 +1091,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } } else { + if s.executor.squash { + // We want to save history for other layers during a squashed build. + // squashToggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + s.executor.squash = false + squashToggle = true + } // We're not going to find any more cache hits, so we // can stop looking for them. checkForLayers = false @@ -996,6 +1109,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) } } + + // Perform final squash for this build as we are one the, + // last instruction of last stage + if (s.executor.squash || squashToggle) && lastInstruction && lastStage { + s.executor.squash = true + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step) + } + } + logImageID(imgID) // Update our working container to be based off of the cached @@ -1110,10 +1234,15 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri } switch strings.ToUpper(node.Value) { case "ARG": - buildArgs := s.getBuildArgs() + for _, variable := range strings.Fields(node.Original) { + if variable != "ARG" { + s.argsFromContainerfile = append(s.argsFromContainerfile, variable) + } + } + buildArgs := s.getBuildArgsKey() return "/bin/sh -c #(nop) ARG " + buildArgs case "RUN": - buildArgs := s.getBuildArgs() + buildArgs := s.getBuildArgsResolvedForRun() if buildArgs != "" { return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] } @@ -1131,10 +1260,71 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri // getBuildArgs returns a string of the build-args specified during the build process // it excludes any build-args that were not used in the build process -func (s *StageExecutor) getBuildArgs() string { - buildArgs := s.stage.Builder.Arguments() - sort.Strings(buildArgs) - return strings.Join(buildArgs, " ") +// values for args are overridden by the values specified using ENV. +// Reason: Values from ENV will always override values specified arg. +func (s *StageExecutor) getBuildArgsResolvedForRun() string { + var envs []string + configuredEnvs := make(map[string]string) + dockerConfig := s.stage.Builder.Config() + + for _, env := range dockerConfig.Env { + splitv := strings.SplitN(env, "=", 2) + if len(splitv) == 2 { + configuredEnvs[splitv[0]] = splitv[1] + } + } + + for key, value := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + // if value was in image it will be given higher priority + // so please embed that into build history + _, inImage := configuredEnvs[key] + if inImage { + envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key])) + } else { + // By default everything must be added to history. + // Following variable is configured to false only for special cases. + addToHistory := true + + // Following value is being assigned from build-args, + // check if this key belongs to any of the predefined allowlist args e.g Proxy Variables + // and if that arg is not manually set in Containerfile/Dockerfile + // then don't write its value to history. + // Following behaviour ensures parity with docker/buildkit. + for _, variable := range config.ProxyEnv { + if key == variable { + // found in predefined args + // so don't add to history + // unless user did explicit `ARG ` + addToHistory = false + for _, processedArg := range s.argsFromContainerfile { + if key == processedArg { + addToHistory = true + } + } + } + } + if addToHistory { + envs = append(envs, fmt.Sprintf("%s=%s", key, value)) + } + } + } + } + sort.Strings(envs) + return strings.Join(envs, " ") +} + +// getBuildArgs key returns set args are key which were specified during the build process +// following function will be exclusively used by build history +func (s *StageExecutor) getBuildArgsKey() string { + var envs []string + for key := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + envs = append(envs, key) + } + } + sort.Strings(envs) + return strings.Join(envs, " ") } // tagExistingImage adds names to an image already in the store @@ -1259,8 +1449,18 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p // commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. +// or commit via any custom exporter if specified. func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) { ib := s.stage.Builder + var buildOutputOption define.BuildOutputOption + if s.executor.buildOutput != "" { + var err error + logrus.Debugf("Generating custom build output with options %q", s.executor.buildOutput) + buildOutputOption, err = parse.GetBuildOutput(s.executor.buildOutput) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to parse build output") + } + } var imageRef types.ImageReference if output != "" { imageRef2, err := s.executor.resolveNameToImageRef(output) @@ -1285,6 +1485,17 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer if s.executor.os != "" { s.builder.SetOS(s.executor.os) } + if s.executor.osVersion != "" { + s.builder.SetOSVersion(s.executor.osVersion) + } + for _, osFeatureSpec := range s.executor.osFeatures { + switch { + case strings.HasSuffix(osFeatureSpec, "-"): + s.builder.UnsetOSFeature(strings.TrimSuffix(osFeatureSpec, "-")) + default: + s.builder.SetOSFeature(osFeatureSpec) + } + } s.builder.SetUser(config.User) s.builder.ClearPorts() for p := range config.ExposedPorts { @@ -1294,6 +1505,28 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer spec := strings.SplitN(envSpec, "=", 2) s.builder.SetEnv(spec[0], spec[1]) } + for _, envSpec := range s.executor.envs { + env := strings.SplitN(envSpec, "=", 2) + if len(env) > 1 { + getenv := func(name string) string { + for _, envvar := range s.builder.Env() { + val := strings.SplitN(envvar, "=", 2) + if len(val) == 2 && val[0] == name { + return val[1] + } + } + logrus.Errorf("error expanding variable %q: no value set in image", name) + return name + } + env[1] = os.Expand(env[1], getenv) + s.builder.SetEnv(env[0], env[1]) + } else { + s.builder.SetEnv(env[0], os.Getenv(env[0])) + } + } + for _, envSpec := range s.executor.unsetEnvs { + s.builder.UnsetEnv(envSpec) + } s.builder.SetCmd(config.Cmd) s.builder.ClearVolumes() for v := range config.Volumes { @@ -1323,6 +1556,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer for k, v := range config.Labels { s.builder.SetLabel(k, v) } + if s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolUndefined || s.executor.commonBuildOptions.IdentityLabel == types.OptionalBoolTrue { + s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) + } for _, labelSpec := range s.executor.labels { label := strings.SplitN(labelSpec, "=", 2) if len(label) > 1 { @@ -1331,7 +1567,6 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer s.builder.SetLabel(label[0], "") } } - s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version) for _, annotationSpec := range s.executor.annotations { annotation := strings.SplitN(annotationSpec, "=", 2) if len(annotation) > 1 { @@ -1365,6 +1600,39 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, } + // generate build output + if s.executor.buildOutput != "" { + extractRootfsOpts := buildah.ExtractRootfsOptions{} + if unshare.IsRootless() { + // In order to maintain as much parity as possible + // with buildkit's version of --output and to avoid + // unsafe invocation of exported executables it was + // decided to strip setuid,setgid and extended attributes. + // Since modes like setuid,setgid leaves room for executable + // to get invoked with different file-system permission its safer + // to strip them off for unpriviledged invocation. + // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633 + extractRootfsOpts.StripSetuidBit = true + extractRootfsOpts.StripSetgidBit = true + extractRootfsOpts.StripXattrs = true + } + rc, errChan, err := s.builder.ExtractRootfs(options, extractRootfsOpts) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to extract rootfs from given container image") + } + defer rc.Close() + err = internalUtil.ExportFromReader(rc, buildOutputOption) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to export build output") + } + if errChan != nil { + err = <-errChan + if err != nil { + return "", nil, err + } + } + + } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { return "", nil, err @@ -1381,5 +1649,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } func (s *StageExecutor) EnsureContainerPath(path string) error { - return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{}) + return s.builder.EnsureContainerPathAs(path, "", nil) +} + +func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return s.builder.EnsureContainerPathAs(path, user, mode) } diff --git a/vendor/github.com/containers/buildah/imagebuildah/util.go b/vendor/github.com/containers/buildah/imagebuildah/util.go index 598e407a86d..90c018fa497 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/util.go +++ b/vendor/github.com/containers/buildah/imagebuildah/util.go @@ -10,3 +10,13 @@ import ( func InitReexec() bool { return buildah.InitReexec() } + +// argsMapToSlice returns the contents of a map[string]string as a slice of keys +// and values joined with "=". +func argsMapToSlice(m map[string]string) []string { + s := make([]string, 0, len(m)) + for k, v := range m { + s = append(s, k+"="+v) + } + return s +} diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 0029a95e27b..a4fcee47686 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -83,6 +83,11 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system return nil, err } + netInt, err := getNetworkInterface(store, "", "") + if err != nil { + return nil, err + } + builder := &Builder{ store: store, Type: containerType, @@ -100,6 +105,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system UIDMap: uidmap, GIDMap: gidmap, }, + NetworkInterface: netInt, } if err := builder.initConfig(ctx, image, systemContext); err != nil { diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index f0bf92ddf33..12b69c9ba60 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -11,10 +11,12 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/util" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,8 +45,10 @@ func Info(store storage.Store) ([]InfoData, error) { func hostInfo() map[string]interface{} { info := map[string]interface{}{} - info["os"] = runtime.GOOS - info["arch"] = runtime.GOARCH + ps := platforms.Normalize(v1.Platform{OS: runtime.GOOS, Architecture: runtime.GOARCH}) + info["os"] = ps.OS + info["arch"] = ps.Architecture + info["variant"] = ps.Variant info["cpus"] = runtime.NumCPU() info["rootless"] = unshare.IsRootless() diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 2a09821f3c1..02a81be6fb8 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -20,32 +20,6 @@ lags the upstream release. sudo yum -y install buildah ``` -The [Kubic project](https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable) -provides updated packages for CentOS 8 and CentOS 8 Stream. - -```bash -# CentOS 8 -sudo dnf -y module disable container-tools -sudo dnf -y install 'dnf-command(copr)' -sudo dnf -y copr enable rhcontainerbot/container-selinux -sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo -# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc -sudo dnf -y --refresh install runc -# Install Buildah -sudo dnf -y --refresh install buildah - -# CentOS 8 Stream -sudo dnf -y module disable container-tools -sudo dnf -y install 'dnf-command(copr)' -sudo dnf -y copr enable rhcontainerbot/container-selinux -sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo -# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc -sudo dnf -y --refresh install runc -# Install Buildah -sudo dnf -y --refresh install buildah -``` - - #### [Debian](https://debian.org) The buildah package is available in @@ -127,26 +101,6 @@ sudo apt-get -y update sudo apt-get -y install buildah ``` -If you would prefer newer (though not as well-tested) packages, -the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/buildah) -provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS). -The packages in Kubic project repos are more frequently updated than the one in Ubuntu's official repositories, due to how Debian/Ubuntu works. -Checkout the Kubic project page for a list of supported Ubuntu version and architecture combinations. -The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/buildah/-/tree/debian/debian). - -CAUTION: On Ubuntu 20.10 and newer, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo -OR the official Ubuntu repos. Mixing and matching may lead to unpredictable situations including installation conflicts. - - -```bash -. /etc/os-release -sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${ID^}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${ID^}_${VERSION_ID}/Release.key -O Release.key -sudo apt-key add - < Release.key -sudo apt-get update -qq -sudo apt-get -qq -y install buildah -``` - # Building from scratch ## System Requirements @@ -254,9 +208,7 @@ Then to install Buildah on Fedora follow the steps in this example: ### RHEL, CentOS -In RHEL and CentOS 7, ensure that you are subscribed to the `rhel-7-server-rpms`, -`rhel-7-server-extras-rpms`, `rhel-7-server-optional-rpms` and `EPEL` repositories, then -run this command: +In RHEL and CentOS, run this command to install the build dependencies: ``` yum -y install \ @@ -278,11 +230,6 @@ run this command: The build steps for Buildah on RHEL or CentOS are the same as for Fedora, above. -*NOTE:* Buildah on RHEL or CentOS version 7.* is not supported running as non-root due to -these systems not having newuidmap or newgidmap installed. It is possible to pull -the shadow-utils source RPM from Fedora 29 and build and install from that in order to -run Buildah as non-root on these systems. - ### openSUSE On openSUSE Tumbleweed, install go via `zypper in go`, then run this command: diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go new file mode 100644 index 00000000000..ec463821547 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/parse/parse.go @@ -0,0 +1,608 @@ +package parse + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/buildah/internal" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" + // mount=type=cache allows users to lock a cache store while its being used by another build + BuildahCacheLockfile = "buildah-cache-lockfile" +) + +var ( + errBadMntOption = errors.New("invalid mount option") + errBadOptionArg = errors.New("must provide an argument for option") + errBadVolDest = errors.New("must set volume destination") + errBadVolSrc = errors.New("must set volume source") + errDuplicateDest = errors.New("duplicate mount destination") +) + +// GetBindMount parses a single bind mount entry from the --mount flag. +// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. +// Caller is expected to perform unmount of any mounted images +func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, string, error) { + newMount := specs.Mount{ + Type: TypeBind, + } + + mountReadability := false + setDest := false + bindNonRecursive := false + fromImage := "" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") + bindNonRecursive = true + case "ro", "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + mountReadability = true + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + mountReadability = true + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + mountReadability = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + case "from": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + fromImage = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "src", "source": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, "", err + } + newMount.Destination = kv[1] + setDest = true + case "consistency": + // Option for OS X only, has no meaning on other platforms + // and can thus be safely ignored. + // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts + default: + return newMount, "", errors.Wrapf(errBadMntOption, kv[0]) + } + } + + // default mount readability is always readonly + if !mountReadability { + newMount.Options = append(newMount.Options, "ro") + } + + // Following variable ensures that we return imagename only if we did additional mount + isImageMounted := false + if fromImage != "" { + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromImage]; ok { + mountPoint = val.MountPoint + } + } + // if mountPoint of image was not found in additionalMap + // or additionalMap was nil, try mounting image + if mountPoint == "" { + image, err := internalUtil.LookupImage(ctx, store, fromImage) + if err != nil { + return newMount, "", err + } + + mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) + if err != nil { + return newMount, "", err + } + isImageMounted = true + } + contextDir = mountPoint + } + + // buildkit parity: default bind option must be `rbind` + // unless specified + if !bindNonRecursive { + newMount.Options = append(newMount.Options, "rbind") + } + + if !setDest { + return newMount, fromImage, errBadVolDest + } + + // buildkit parity: support absolute path for sources from current build context + if contextDir != "" { + // path should be /contextDir/specified path + newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // looks like its coming from `build run --mount=type=bind` allow using absolute path + // error out if no source is set + if newMount.Source == "" { + return newMount, "", errBadVolSrc + } + if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { + return newMount, "", err + } + } + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, fromImage, err + } + newMount.Options = opts + + if !isImageMounted { + // we don't want any cleanups if image was not mounted explicitly + // so dont return anything + fromImage = "" + } + + return newMount, fromImage, nil +} + +// GetCacheMount parses a single cache mount entry from the --mount flag. +func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, []string, error) { + var err error + var mode uint64 + lockedTargets := make([]string, 0) + var ( + setDest bool + setShared bool + setReadOnly bool + ) + fromStage := "" + newMount := specs.Mount{ + Type: TypeBind, + } + // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id + id := "" + //buidkit parity: cache directory defaults to 755 + mode = 0o755 + //buidkit parity: cache directory defaults to uid 0 if not specified + uid := 0 + //buidkit parity: cache directory defaults to gid 0 if not specified + gid := 0 + // sharing mode + sharing := "shared" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + case "readonly", "ro": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + setReadOnly = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + setShared = true + case "sharing": + sharing = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "id": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + id = kv[1] + case "from": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + fromStage = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, lockedTargets, err + } + newMount.Destination = kv[1] + setDest = true + case "src", "source": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "mode": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + mode, err = strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache mode") + } + case "uid": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + uid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache uid") + } + case "gid": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + gid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache gid") + } + default: + return newMount, lockedTargets, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, lockedTargets, errBadVolDest + } + + if fromStage != "" { + // do not create cache on host + // instead use read-only mounted stage as cache + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromStage]; ok { + if val.IsStage { + mountPoint = val.MountPoint + } + } + } + // Cache does not supports using image so if not stage found + // return with error + if mountPoint == "" { + return newMount, lockedTargets, fmt.Errorf("no stage found with name %s", fromStage) + } + // path should be /contextDir/specified path + newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // we need to create cache on host if no image is being used + + // since type is cache and cache can be reused by consecutive builds + // create a common cache directory, which persists on hosts within temp lifecycle + // add subdirectory if specified + + // cache parent directory + cacheParent := filepath.Join(getTempDir(), BuildahCacheDir) + // create cache on host if not present + err = os.MkdirAll(cacheParent, os.FileMode(0755)) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to create build cache directory") + } + + if id != "" { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) + } else { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) + } + idPair := idtools.IDPair{ + UID: uid, + GID: gid, + } + //buildkit parity: change uid and gid if specified otheriwise keep `0` + err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to change uid,gid of cache directory") + } + } + + switch sharing { + case "locked": + // lock parent cache + lockfile, err := lockfile.GetLockfile(filepath.Join(newMount.Source, BuildahCacheLockfile)) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked") + } + // Will be unlocked after the RUN step is executed. + lockfile.Lock() + lockedTargets = append(lockedTargets, filepath.Join(newMount.Source, BuildahCacheLockfile)) + case "shared": + // do nothing since default is `shared` + break + default: + // error out for unknown values + return newMount, lockedTargets, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing) + } + + // buildkit parity: default sharing should be shared + // unless specified + if !setShared { + newMount.Options = append(newMount.Options, "shared") + } + + // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly + if !setReadOnly { + newMount.Options = append(newMount.Options, "rw") + } + + newMount.Options = append(newMount.Options, "bind") + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, lockedTargets, err + } + newMount.Options = opts + + return newMount, lockedTargets, nil +} + +// ValidateVolumeMountHostDir validates the host path of buildah --volume +func ValidateVolumeMountHostDir(hostDir string) error { + if !filepath.IsAbs(hostDir) { + return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) + } + if _, err := os.Stat(hostDir); err != nil { + return errors.WithStack(err) + } + return nil +} + +// RevertEscapedColon converts "\:" to ":" +func RevertEscapedColon(source string) string { + return strings.ReplaceAll(source, "\\:", ":") +} + +// SplitStringWithColonEscape splits string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator +func SplitStringWithColonEscape(str string) []string { + result := make([]string, 0, 3) + sb := &strings.Builder{} + for idx, r := range str { + if r == ':' { + // the colon is backslash-escaped + if idx-1 > 0 && str[idx-1] == '\\' { + sb.WriteRune(r) + } else { + // os.Stat will fail if path contains escaped colon + result = append(result, RevertEscapedColon(sb.String())) + sb.Reset() + } + } else { + sb.WriteRune(r) + } + } + if sb.Len() > 0 { + result = append(result, RevertEscapedColon(sb.String())) + } + return result +} + +func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { + finalVolumeMounts := make(map[string]specs.Mount) + + for _, volume := range volumes { + volumeMount, err := Volume(volume) + if err != nil { + return nil, err + } + if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { + return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) + } + finalVolumeMounts[volumeMount.Destination] = volumeMount + } + return finalVolumeMounts, nil +} + +// Volume parses the input of --volume +func Volume(volume string) (specs.Mount, error) { + mount := specs.Mount{} + arr := SplitStringWithColonEscape(volume) + if len(arr) < 2 { + return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) + } + if err := ValidateVolumeMountHostDir(arr[0]); err != nil { + return mount, err + } + if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil { + return mount, err + } + mountOptions := "" + if len(arr) > 2 { + mountOptions = arr[2] + if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil { + return mount, err + } + } + mountOpts := strings.Split(mountOptions, ",") + mount.Source = arr[0] + mount.Destination = arr[1] + mount.Type = "rbind" + mount.Options = mountOpts + return mount, nil +} + +// GetVolumes gets the volumes from --volume and --mount +func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, []string, error) { + unifiedMounts, mountedImages, lockedTargets, err := getMounts(ctx, store, mounts, contextDir) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + volumeMounts, err := getVolumeMounts(volumes) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + for dest, mount := range volumeMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, dest) + } + unifiedMounts[dest] = mount + } + + finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) + for _, mount := range unifiedMounts { + finalMounts = append(finalMounts, mount) + } + return finalMounts, mountedImages, lockedTargets, nil +} + +// getMounts takes user-provided input from the --mount flag and creates OCI +// spec mounts. +// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// buildah run --mount type=tmpfs,target=/dev/shm ... +func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []string, error) { + finalMounts := make(map[string]specs.Mount) + mountedImages := make([]string, 0) + lockedTargets := make([]string, 0) + + errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + + // TODO(vrothberg): the manual parsing can be replaced with a regular expression + // to allow a more robust parsing of the mount format and to give + // precise errors regarding supported format versus supported options. + for _, mount := range mounts { + arr := strings.SplitN(mount, ",", 2) + if len(arr) < 2 { + return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + kv := strings.Split(arr[0], "=") + // TODO: type is not explicitly required in Docker. + // If not specified, it defaults to "volume". + if len(kv) != 2 || kv[0] != "type" { + return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + + tokens := strings.Split(arr[1], ",") + switch kv[1] { + case TypeBind: + mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + mountedImages = append(mountedImages, image) + case TypeCache: + mount, lockedPaths, err := GetCacheMount(tokens, store, "", nil) + lockedTargets = lockedPaths + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + case TypeTmpfs: + mount, err := GetTmpfsMount(tokens) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + default: + return nil, mountedImages, lockedTargets, errors.Errorf("invalid filesystem type %q", kv[1]) + } + } + + return finalMounts, mountedImages, lockedTargets, nil +} + +// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag +func GetTmpfsMount(args []string) (specs.Mount, error) { + newMount := specs.Mount{ + Type: TypeTmpfs, + Source: TypeTmpfs, + } + + setDest := false + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "ro", "nosuid", "nodev", "noexec": + newMount.Options = append(newMount.Options, kv[0]) + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + case "tmpcopyup": + //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. + newMount.Options = append(newMount.Options, kv[0]) + case "tmpfs-mode": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + case "tmpfs-size": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + case "src", "source": + return newMount, errors.Errorf("source is not supported with tmpfs mounts") + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = kv[1] + setDest = true + default: + return newMount, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, errBadVolDest + } + + return newMount, nil +} + +/* This is internal function and could be changed at any time */ +/* for external usage please refer to buildah/pkg/parse.GetTempDir() */ +func getTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + return tmpdir + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go new file mode 100644 index 00000000000..8ddff99fb75 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/types.go @@ -0,0 +1,11 @@ +package internal + +// Types is internal packages are suspected to change with releases avoid using these outside of buildah + +// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor +// StageExecutor has ability to mount stages/images in current context and +// automatically clean them up. +type StageMountDetails struct { + IsStage bool // tells if mountpoint returned from stage executor is stage or image + MountPoint string // mountpoint of stage/image +} diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go new file mode 100644 index 00000000000..691d89d65f7 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -0,0 +1,81 @@ +package util + +import ( + "io" + "os" + "path/filepath" + + "github.com/containers/buildah/define" + "github.com/containers/common/libimage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" +) + +// LookupImage returns *Image to corresponding imagename or id +func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*libimage.Image, error) { + systemContext := ctx + if systemContext == nil { + systemContext = &types.SystemContext{} + } + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return nil, err + } + localImage, _, err := runtime.LookupImage(image, nil) + if err != nil { + return nil, err + } + return localImage, nil +} + +// ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout. +func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { + var err error + if !filepath.IsAbs(opts.Path) { + opts.Path, err = filepath.Abs(opts.Path) + if err != nil { + return err + } + } + if opts.IsDir { + // In order to keep this feature as close as possible to + // buildkit it was decided to preserve ownership when + // invoked as root since caller already has access to artifacts + // therefore we can preserve ownership as is, however for rootless users + // ownership has to be changed so exported artifacts can still + // be accessible by unpriviledged users. + // See: https://github.com/containers/buildah/pull/3823#discussion_r829376633 + noLChown := false + if unshare.IsRootless() { + noLChown = true + } + + err = os.MkdirAll(opts.Path, 0700) + if err != nil { + return errors.Wrapf(err, "failed while creating the destination path %q", opts.Path) + } + + err = chrootarchive.Untar(input, opts.Path, &archive.TarOptions{NoLchown: noLChown}) + if err != nil { + return errors.Wrapf(err, "failed while performing untar at %q", opts.Path) + } + } else { + outFile := os.Stdout + if !opts.IsStdout { + outFile, err = os.Create(opts.Path) + if err != nil { + return errors.Wrapf(err, "failed while creating destination tar at %q", opts.Path) + } + defer outFile.Close() + } + _, err = io.Copy(outFile, input) + if err != nil { + return errors.Wrapf(err, "failed while performing copy to %q", opts.Path) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 85a0f0b31b0..03b2de662d2 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/containers/buildah/define" - "github.com/containers/buildah/pkg/blobcache" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/image" @@ -16,6 +15,7 @@ import ( "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/stringid" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/openshift/imagebuilder" @@ -49,6 +49,15 @@ func getImageName(name string, img *storage.Image) string { func imageNamePrefix(imageName string) string { prefix := imageName + if d, err := digest.Parse(imageName); err == nil { + prefix = d.Encoded() + if len(prefix) > 12 { + prefix = prefix[:12] + } + } + if stringid.ValidateID(prefix) == nil { + prefix = stringid.TruncateID(prefix) + } s := strings.Split(prefix, ":") if len(s) > 0 { prefix = s[0] @@ -113,6 +122,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions options.FromImage = "" } + if options.NetworkInterface == nil { + // create the network interface + // Note: It is important to do this before we pull any images/create containers. + // The default backend detection logic needs an empty store to correctly detect + // that we can use netavark, if the store was not empty it will use CNI to not break existing installs. + options.NetworkInterface, err = getNetworkInterface(store, options.CNIConfigDir, options.CNIPluginPath) + if err != nil { + return nil, err + } + } + systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) if options.FromImage != "" && options.FromImage != "scratch" { @@ -134,14 +154,11 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions pullOptions.OciDecryptConfig = options.OciDecryptConfig pullOptions.SignaturePolicyPath = options.SignaturePolicyPath pullOptions.Writer = options.ReportWriter + pullOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) maxRetries := uint(options.MaxPullRetries) pullOptions.MaxRetries = &maxRetries - if options.BlobDirectory != "" { - pullOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) - } - pulledImages, err := imageRuntime.Pull(ctx, options.FromImage, pullPolicy, &pullOptions) if err != nil { return nil, err @@ -197,6 +214,9 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions } name := "working-container" + if options.ContainerSuffix != "" { + name = options.ContainerSuffix + } if options.Container != "" { name = options.Container } else { @@ -216,9 +236,20 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions conflict := 100 for { + + var flags map[string]interface{} + // check if we have predefined ProcessLabel and MountLabel + // this could be true if this is another stage in a build + if options.ProcessLabel != "" && options.MountLabel != "" { + flags = map[string]interface{}{ + "ProcessLabel": options.ProcessLabel, + "MountLabel": options.MountLabel, + } + } coptions := storage.ContainerOptions{ LabelOpts: options.CommonBuildOpts.LabelOpts, IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions), + Flags: flags, Volatile: true, } container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions) @@ -283,13 +314,15 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions UIDMap: uidmap, GIDMap: gidmap, }, - Capabilities: copyStringSlice(options.Capabilities), - CommonBuildOpts: options.CommonBuildOpts, - TopLayer: topLayer, - Args: options.Args, - Format: options.Format, - TempVolumes: map[string]bool{}, - Devices: options.Devices, + Capabilities: copyStringSlice(options.Capabilities), + CommonBuildOpts: options.CommonBuildOpts, + TopLayer: topLayer, + Args: copyStringStringMap(options.Args), + Format: options.Format, + TempVolumes: map[string]bool{}, + Devices: options.Devices, + Logger: options.Logger, + NetworkInterface: options.NetworkInterface, } if options.Mount { diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go index 8dadec13084..fa60619ac62 100644 --- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -1,39 +1,8 @@ package blobcache import ( - "bytes" - "context" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/containers/buildah/docker" - "github.com/containers/common/libimage" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/compression" - "github.com/containers/image/v5/transports" + imageBlobCache "github.com/containers/image/v5/pkg/blobcache" "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - digest "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - _ types.ImageReference = &blobCacheReference{} - _ types.ImageSource = &blobCacheSource{} - _ types.ImageDestination = &blobCacheDestination{} -) - -const ( - compressedNote = ".compressed" - decompressedNote = ".decompressed" ) // BlobCache is an object which saves copies of blobs that are written to it while passing them @@ -52,517 +21,11 @@ type BlobCache interface { ClearCache() error } -type blobCacheReference struct { - reference types.ImageReference - // WARNING: The contents of this directory may be accessed concurrently, - // both within this process and by multiple different processes - directory string - compress types.LayerCompression -} - -type blobCacheSource struct { - reference *blobCacheReference - source types.ImageSource - sys types.SystemContext - // this mutex synchronizes the counters below - mu sync.Mutex - cacheHits int64 - cacheMisses int64 - cacheErrors int64 -} - -type blobCacheDestination struct { - reference *blobCacheReference - destination types.ImageDestination -} - -func makeFilename(blobSum digest.Digest, isConfig bool) string { - if isConfig { - return blobSum.String() + ".config" - } - return blobSum.String() -} - -// CacheLookupReferenceFunc wraps a BlobCache into a -// libimage.LookupReferenceFunc to allow for using a BlobCache during -// image-copy operations. -func CacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc { - // NOTE: this prevents us from moving BlobCache around and generalizes - // the libimage API. - return func(ref types.ImageReference) (types.ImageReference, error) { - ref, err := NewBlobCache(ref, directory, compress) - if err != nil { - return nil, errors.Wrapf(err, "error using blobcache %q", directory) - } - return ref, nil - } -} - // NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are // written to the destination image created from the resulting reference will also be stored -// as-is to the specified directory or a temporary directory. The cache directory's contents -// can be cleared by calling the returned BlobCache()'s ClearCache() method. +// as-is to the specified directory or a temporary directory. // The compress argument controls whether or not the cache will try to substitute a compressed // or different version of a blob when preparing the list of layers when reading an image. func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) { - if directory == "" { - return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) - } - switch compress { - case types.Compress, types.Decompress, types.PreserveOriginal: - // valid value, accept it - default: - return nil, errors.Errorf("unhandled LayerCompression value %v", compress) - } - return &blobCacheReference{ - reference: ref, - directory: directory, - compress: compress, - }, nil -} - -func (r *blobCacheReference) Transport() types.ImageTransport { - return r.reference.Transport() -} - -func (r *blobCacheReference) StringWithinTransport() string { - return r.reference.StringWithinTransport() -} - -func (r *blobCacheReference) DockerReference() reference.Named { - return r.reference.DockerReference() -} - -func (r *blobCacheReference) PolicyConfigurationIdentity() string { - return r.reference.PolicyConfigurationIdentity() -} - -func (r *blobCacheReference) PolicyConfigurationNamespaces() []string { - return r.reference.PolicyConfigurationNamespaces() -} - -func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return r.reference.DeleteImage(ctx, sys) -} - -func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { - if blobinfo.Digest == "" { - return false, -1, nil - } - - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig)) - fileInfo, err := os.Stat(filename) - if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { - return true, fileInfo.Size(), nil - } - if !os.IsNotExist(err) { - return false, -1, errors.Wrap(err, "checking size") - } - } - - return false, -1, nil -} - -func (r *blobCacheReference) Directory() string { - return r.directory -} - -func (r *blobCacheReference) ClearCache() error { - f, err := os.Open(r.directory) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - names, err := f.Readdirnames(-1) - if err != nil { - return errors.Wrapf(err, "error reading directory %q", r.directory) - } - for _, name := range names { - pathname := filepath.Join(r.directory, name) - if err = os.RemoveAll(pathname); err != nil { - return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(r)) - } - } - return nil -} - -func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference)) - } - return image.FromSource(ctx, sys, src) -} - -func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - src, err := r.reference.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference)) - } - logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress) - return &blobCacheSource{reference: r, source: src, sys: *sys}, nil -} - -func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - dest, err := r.reference.NewImageDestination(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference)) - } - logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory) - return &blobCacheDestination{reference: r, destination: dest}, nil -} - -func (s *blobCacheSource) Reference() types.ImageReference { - return s.reference -} - -func (s *blobCacheSource) Close() error { - logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) - return s.source.Close() -} - -func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) - manifestBytes, err := ioutil.ReadFile(filename) - if err == nil { - s.cacheHits++ - return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil - } - if !os.IsNotExist(err) { - s.cacheErrors++ - return nil, "", errors.Wrap(err, "checking for manifest file") - } - } - s.cacheMisses++ - return s.source.GetManifest(ctx, instanceDigest) -} - -func (s *blobCacheSource) HasThreadSafeGetBlob() bool { - return s.source.HasThreadSafeGetBlob() -} - -func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - present, size, err := s.reference.HasBlob(blobinfo) - if err != nil { - return nil, -1, err - } - if present { - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - s.mu.Lock() - s.cacheHits++ - s.mu.Unlock() - return f, size, nil - } - if !os.IsNotExist(err) { - s.mu.Lock() - s.cacheErrors++ - s.mu.Unlock() - return nil, -1, errors.Wrap(err, "checking for cache") - } - } - } - s.mu.Lock() - s.cacheMisses++ - s.mu.Unlock() - rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) - if err != nil { - return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) - } - return rc, size, nil -} - -func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.source.GetSignatures(ctx, instanceDigest) -} - -func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - signatures, err := s.source.GetSignatures(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) - } - canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) - - infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - if infos == nil { - img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) - if err != nil { - return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - infos = img.LayerInfos() - } - - if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { - replacedInfos := make([]types.BlobInfo, 0, len(infos)) - for _, info := range infos { - var replaceDigest []byte - var err error - blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) - alternate := "" - switch s.reference.compress { - case types.Compress: - alternate = blobFile + compressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - case types.Decompress: - alternate = blobFile + decompressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - } - if err == nil && digest.Digest(replaceDigest).Validate() == nil { - alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) - fileInfo, err := os.Stat(alternate) - if err == nil { - switch info.MediaType { - case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: - switch s.reference.compress { - case types.Compress: - info.MediaType = v1.MediaTypeImageLayerGzip - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - info.MediaType = v1.MediaTypeImageLayer - info.CompressionAlgorithm = nil - } - case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType: - switch s.reference.compress { - case types.Compress: - info.MediaType = manifest.DockerV2Schema2LayerMediaType - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - // nope, not going to suggest anything, it's not allowed by the spec - replacedInfos = append(replacedInfos, info) - continue - } - } - logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) - info.CompressionOperation = s.reference.compress - info.Digest = digest.Digest(replaceDigest) - info.Size = fileInfo.Size() - logrus.Debugf("info = %#v", info) - } - } - replacedInfos = append(replacedInfos, info) - } - infos = replacedInfos - } - - return infos, nil -} - -func (d *blobCacheDestination) Reference() types.ImageReference { - return d.reference -} - -func (d *blobCacheDestination) Close() error { - logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) - return d.destination.Close() -} - -func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { - return d.destination.SupportedManifestMIMETypes() -} - -func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { - return d.destination.SupportsSignatures(ctx) -} - -func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { - return d.destination.DesiredLayerCompression() -} - -func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { - return d.destination.AcceptsForeignLayerURLs() -} - -func (d *blobCacheDestination) MustMatchRuntimeOS() bool { - return d.destination.MustMatchRuntimeOS() -} - -func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { - return d.destination.IgnoresEmbeddedDockerReference() -} - -// Decompress and save the contents of the decompressReader stream into the passed-in temporary -// file. If we successfully save all of the data, rename the file to match the digest of the data, -// and make notes about the relationship between the file that holds a copy of the compressed data -// and this new file. -func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { - defer wg.Done() - // Decompress from and digest the reading end of that pipe. - decompressed, err3 := archive.DecompressStream(decompressReader) - digester := digest.Canonical.Digester() - if err3 == nil { - // Read the decompressed data through the filter over the pipe, blocking until the - // writing end is closed. - _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) - } else { - // Drain the pipe to keep from stalling the PutBlob() thread. - if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil { - logrus.Debugf("error draining the pipe: %v", err) - } - } - decompressReader.Close() - decompressed.Close() - tempFile.Close() - // Determine the name that we should give to the uncompressed copy of the blob. - decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) - if err3 == nil { - // Rename the temporary file. - if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { - logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } else { - *alternateDigest = digester.Digest() - // Note the relationship between the two files. - if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { - logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) - } - if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { - logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) - } - } - } else { - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } -} - -func (d *blobCacheDestination) HasThreadSafePutBlob() bool { - return d.destination.HasThreadSafePutBlob() -} - -func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - var tempfile *os.File - var err error - var n int - var alternateDigest digest.Digest - var closer io.Closer - wg := new(sync.WaitGroup) - needToWait := false - compression := archive.Uncompressed - if inputInfo.Digest != "" { - filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err == nil { - stream = io.TeeReader(stream, tempfile) - defer func() { - if err == nil { - if err = os.Rename(tempfile.Name(), filename); err != nil { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) - } - } else { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - } - tempfile.Close() - }() - } else { - logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) - } - if !isConfig { - initial := make([]byte, 8) - n, err = stream.Read(initial) - if n > 0 { - // Build a Reader that will still return the bytes that we just - // read, for PutBlob()'s sake. - stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) - if n >= len(initial) { - compression = archive.DetectCompression(initial[:n]) - } - if compression == archive.Gzip { - // The stream is compressed, so create a file which we'll - // use to store a decompressed copy. - decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err2 != nil { - logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) - decompressedTemp.Close() - } else { - // Write a copy of the compressed data to a pipe, - // closing the writing end of the pipe after - // PutBlob() returns. - decompressReader, decompressWriter := io.Pipe() - closer = decompressWriter - stream = io.TeeReader(stream, decompressWriter) - // Let saveStream() close the reading end and handle the temporary file. - wg.Add(1) - needToWait = true - go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) - } - } - } - } - } - newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) - if closer != nil { - closer.Close() - } - if needToWait { - wg.Wait() - } - if err != nil { - return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) - } - if alternateDigest.Validate() == nil { - logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) - } else { - logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) - } - return newBlobInfo, nil -} - -func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) - if err != nil || present { - return present, reusedInfo, err - } - - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - defer f.Close() - uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) - if err != nil { - return false, types.BlobInfo{}, err - } - return true, uploadedInfo, nil - } - } - - return false, types.BlobInfo{}, nil -} - -func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { - manifestDigest, err := manifest.Digest(manifestBytes) - if err != nil { - logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) - } else { - filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) - if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { - logrus.Warnf("error saving manifest as %q: %v", filename, err) - } - } - return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) -} - -func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - return d.destination.PutSignatures(ctx, signatures, instanceDigest) -} - -func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return d.destination.Commit(ctx, unparsedToplevel) + return imageBlobCache.NewBlobCache(ref, directory, compress) } diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 8ee4ab6d1f6..c325bc5cfb3 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -18,6 +18,40 @@ import ( "golang.org/x/sys/unix" ) +// Options type holds various configuration options for overlay +// MountWithOptions accepts following type so it is easier to specify +// more verbose configuration for overlay mount. +type Options struct { + // The Upper directory is normally writable layer in an overlay mount. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to UpperDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + UpperDirOptionFragment string + // The Workdir is used to prepare files as they are switched between the layers. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to WorkDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + WorkDirOptionFragment string + // Graph options relayed from podman, will be responsible for choosing mount program + GraphOpts []string + // Mark if following overlay is read only + ReadOnly bool + // RootUID is not used yet but keeping it here for legacy reasons. + RootUID int + // RootGID is not used yet but keeping it here for legacy reasons. + RootGID int +} + // TempDir generates an overlay Temp directory in the container content func TempDir(containerDir string, rootUID, rootGID int) (string, error) { contentDir := filepath.Join(containerDir, "overlay") @@ -65,7 +99,8 @@ func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string // from the source system. It then mounts up the source directory on to the // generated mount point and returns the mount point to the caller. func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { - return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, false) + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: false, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) } // MountReadOnly creates a subdir of the contentDir based on the source directory @@ -73,26 +108,71 @@ func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions [ // generated mount point and returns the mount point to the caller. Note that no // upper layer will be created rendering it a read-only mount func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { - return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, true) + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: true, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) } -// NOTE: rootUID and rootUID are not yet used. -func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []string, readOnly bool) (mount specs.Mount, Err error) { +// findMountProgram finds if any mount program is specified in the graph options. +func findMountProgram(graphOptions []string) string { + mountMap := map[string]bool{ + ".mount_program": true, + "overlay.mount_program": true, + "overlay2.mount_program": true, + } + + for _, i := range graphOptions { + s := strings.SplitN(i, "=", 2) + if len(s) != 2 { + continue + } + key := s[0] + val := s[1] + if mountMap[key] { + return val + } + } + + return "" +} + +// mountWithMountProgram mount an overlay at mergeDir using the specified mount program +// and overlay options. +func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error { + cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) + + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "exec %s", mountProgram) + } + return nil +} + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { mergeDir := filepath.Join(contentDir, "merge") // Create overlay mount options for rw/ro. var overlayOptions string - if readOnly { + if opts.ReadOnly { // Read-only overlay mounts require two lower layer. lowerTwo := filepath.Join(contentDir, "lower") if err := os.Mkdir(lowerTwo, 0755); err != nil { return mount, err } - overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", source, lowerTwo) + overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) } else { // Read-write overlay mounts want a lower, upper and a work layer. workDir := filepath.Join(contentDir, "work") upperDir := filepath.Join(contentDir, "upper") + + if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { + workDir = opts.WorkDirOptionFragment + upperDir = opts.UpperDirOptionFragment + } + st, err := os.Stat(source) if err != nil { return mount, err @@ -105,45 +185,24 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin return mount, err } } - - overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir) + overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) } - if unshare.IsRootless() { - mountProgram := "" - - mountMap := map[string]bool{ - ".mount_program": true, - "overlay.mount_program": true, - "overlay2.mount_program": true, - } - - for _, i := range graphOptions { - s := strings.SplitN(i, "=", 2) - if len(s) != 2 { - continue - } - key := s[0] - val := s[1] - if mountMap[key] { - mountProgram = val - break - } + mountProgram := findMountProgram(opts.GraphOpts) + if mountProgram != "" { + if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { + return mount, err } - if mountProgram != "" { - cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) - if err := cmd.Run(); err != nil { - return mount, errors.Wrapf(err, "exec %s", mountProgram) - } + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "bind" + mount.Options = []string{"bind", "slave"} + return mount, nil + } - mount.Source = mergeDir - mount.Destination = dest - mount.Type = "bind" - mount.Options = []string{"bind", "slave"} - return mount, nil - } - /* If a mount_program is not specified, fallback to try mount native overlay. */ + if unshare.IsRootless() { + /* If a mount_program is not specified, fallback to try mounting native overlay. */ overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) } @@ -155,6 +214,11 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin return mount, nil } +// Convert ":" to "\:", the path which will be overlay mounted need to be escaped +func escapeColon(source string) string { + return strings.ReplaceAll(source, ":", "\\:") +} + // RemoveTemp removes temporary mountpoint and all content from its parent // directory func RemoveTemp(contentDir string) error { diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 685d63d31f9..07986384518 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -9,12 +9,13 @@ import ( "net" "os" "path/filepath" - "runtime" "strconv" "strings" "unicode" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" + internalParse "github.com/containers/buildah/internal/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/common/pkg/parse" "github.com/containers/image/v5/types" @@ -22,9 +23,11 @@ import ( "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/openshift/imagebuilder" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/spf13/pflag" "golang.org/x/term" ) @@ -37,17 +40,20 @@ const ( TypeBind = "bind" // TypeTmpfs is the type for mounting tmpfs TypeTmpfs = "tmpfs" -) - -var ( - errBadMntOption = errors.Errorf("invalid mount option") - errDuplicateDest = errors.Errorf("duplicate mount destination") - optionArgError = errors.Errorf("must provide an argument for option") - noDestError = errors.Errorf("must set volume destination") + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" ) // CommonBuildOptions parses the build options from the bud cli func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { + return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// CommonBuildOptionsFromFlagSet parses the build options from the bud cli +func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) { var ( memoryLimit int64 memorySwap int64 @@ -55,7 +61,7 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { err error ) - memVal, _ := c.Flags().GetString("memory") + memVal, _ := flags.GetString("memory") if memVal != "" { memoryLimit, err = units.RAMInBytes(memVal) if err != nil { @@ -63,16 +69,25 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { } } - memSwapValue, _ := c.Flags().GetString("memory-swap") + memSwapValue, _ := flags.GetString("memory-swap") if memSwapValue != "" { - memorySwap, err = units.RAMInBytes(memSwapValue) - if err != nil { - return nil, errors.Wrapf(err, "invalid value for memory-swap") + if memSwapValue == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(memSwapValue) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory-swap") + } } } - addHost, _ := c.Flags().GetStringSlice("add-host") + noHosts, _ := flags.GetBool("no-hosts") + + addHost, _ := flags.GetStringSlice("add-host") if len(addHost) > 0 { + if noHosts { + return nil, errors.Errorf("--no-hosts and --add-host conflict, can not be used together") + } for _, host := range addHost { if err := validateExtraHost(host); err != nil { return nil, errors.Wrapf(err, "invalid value for add-host") @@ -82,8 +97,8 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { noDNS = false dnsServers := []string{} - if c.Flag("dns").Changed { - dnsServers, _ = c.Flags().GetStringSlice("dns") + if flags.Changed("dns") { + dnsServers, _ = flags.GetStringSlice("dns") for _, server := range dnsServers { if strings.ToLower(server) == "none" { noDNS = true @@ -95,62 +110,65 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { } dnsSearch := []string{} - if c.Flag("dns-search").Changed { - dnsSearch, _ = c.Flags().GetStringSlice("dns-search") + if flags.Changed("dns-search") { + dnsSearch, _ = flags.GetStringSlice("dns-search") if noDNS && len(dnsSearch) > 0 { return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none") } } dnsOptions := []string{} - if c.Flag("dns-option").Changed { - dnsOptions, _ = c.Flags().GetStringSlice("dns-option") + if flags.Changed("dns-option") { + dnsOptions, _ = flags.GetStringSlice("dns-option") if noDNS && len(dnsOptions) > 0 { return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none") } } - if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil { + if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil { return nil, errors.Wrapf(err, "invalid --shm-size") } - volumes, _ := c.Flags().GetStringArray("volume") + volumes, _ := flags.GetStringArray("volume") if err := Volumes(volumes); err != nil { return nil, err } - cpuPeriod, _ := c.Flags().GetUint64("cpu-period") - cpuQuota, _ := c.Flags().GetInt64("cpu-quota") - cpuShares, _ := c.Flags().GetUint64("cpu-shares") - httpProxy, _ := c.Flags().GetBool("http-proxy") + cpuPeriod, _ := flags.GetUint64("cpu-period") + cpuQuota, _ := flags.GetInt64("cpu-quota") + cpuShares, _ := flags.GetUint64("cpu-shares") + httpProxy, _ := flags.GetBool("http-proxy") + identityLabel, _ := flags.GetBool("identity-label") ulimit := []string{} - if c.Flag("ulimit").Changed { - ulimit, _ = c.Flags().GetStringSlice("ulimit") + if flags.Changed("ulimit") { + ulimit, _ = flags.GetStringSlice("ulimit") } - secrets, _ := c.Flags().GetStringArray("secret") - sshsources, _ := c.Flags().GetStringArray("ssh") + secrets, _ := flags.GetStringArray("secret") + sshsources, _ := flags.GetStringArray("ssh") commonOpts := &define.CommonBuildOptions{ - AddHost: addHost, - CPUPeriod: cpuPeriod, - CPUQuota: cpuQuota, - CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(), - CPUSetMems: c.Flag("cpuset-mems").Value.String(), - CPUShares: cpuShares, - CgroupParent: c.Flag("cgroup-parent").Value.String(), - DNSOptions: dnsOptions, - DNSSearch: dnsSearch, - DNSServers: dnsServers, - HTTPProxy: httpProxy, - Memory: memoryLimit, - MemorySwap: memorySwap, - ShmSize: c.Flag("shm-size").Value.String(), - Ulimit: ulimit, - Volumes: volumes, - Secrets: secrets, - SSHSources: sshsources, - } - securityOpts, _ := c.Flags().GetStringArray("security-opt") + AddHost: addHost, + CPUPeriod: cpuPeriod, + CPUQuota: cpuQuota, + CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(), + CPUSetMems: findFlagFunc("cpuset-mems").Value.String(), + CPUShares: cpuShares, + CgroupParent: findFlagFunc("cgroup-parent").Value.String(), + DNSOptions: dnsOptions, + DNSSearch: dnsSearch, + DNSServers: dnsServers, + HTTPProxy: httpProxy, + IdentityLabel: types.NewOptionalBool(identityLabel), + Memory: memoryLimit, + MemorySwap: memorySwap, + NoHosts: noHosts, + ShmSize: findFlagFunc("shm-size").Value.String(), + Ulimit: ulimit, + Volumes: volumes, + Secrets: secrets, + SSHSources: sshsources, + } + securityOpts, _ := flags.GetStringArray("security-opt") if err := parseSecurityOpts(securityOpts, commonOpts); err != nil { return nil, err } @@ -164,7 +182,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti } con := strings.SplitN(opt, "=", 2) if len(con) != 2 { - return errors.Errorf("Invalid --security-opt name=value pair: %q", opt) + return errors.Errorf("invalid --security-opt name=value pair: %q", opt) } switch con[0] { @@ -175,7 +193,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti case "seccomp": commonOpts.SeccompProfilePath = con[1] default: - return errors.Errorf("Invalid --security-opt 2: %q", opt) + return errors.Errorf("invalid --security-opt 2: %q", opt) } } @@ -199,32 +217,14 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti return nil } +// Split string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator +func SplitStringWithColonEscape(str string) []string { + return internalParse.SplitStringWithColonEscape(str) +} + // Volume parses the input of --volume func Volume(volume string) (specs.Mount, error) { - mount := specs.Mount{} - arr := strings.SplitN(volume, ":", 3) - if len(arr) < 2 { - return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) - } - if err := validateVolumeMountHostDir(arr[0]); err != nil { - return mount, err - } - if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil { - return mount, err - } - mountOptions := "" - if len(arr) > 2 { - mountOptions = arr[2] - if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil { - return mount, err - } - } - mountOpts := strings.Split(mountOptions, ",") - mount.Source = arr[0] - mount.Destination = arr[1] - mount.Type = "rbind" - mount.Options = mountOpts - return mount, nil + return internalParse.Volume(volume) } // Volumes validates the host and container paths passed in to the --volume flag @@ -240,236 +240,11 @@ func Volumes(volumes []string) error { return nil } -func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { - finalVolumeMounts := make(map[string]specs.Mount) - - for _, volume := range volumes { - volumeMount, err := Volume(volume) - if err != nil { - return nil, err - } - if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) - } - finalVolumeMounts[volumeMount.Destination] = volumeMount - } - return finalVolumeMounts, nil -} - -// GetVolumes gets the volumes from --volume and --mount -func GetVolumes(volumes []string, mounts []string) ([]specs.Mount, error) { - unifiedMounts, err := getMounts(mounts) - if err != nil { - return nil, err - } - volumeMounts, err := getVolumeMounts(volumes) - if err != nil { - return nil, err - } - for dest, mount := range volumeMounts { - if _, ok := unifiedMounts[dest]; ok { - return nil, errors.Wrapf(errDuplicateDest, dest) - } - unifiedMounts[dest] = mount - } - - finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) - for _, mount := range unifiedMounts { - finalMounts = append(finalMounts, mount) - } - return finalMounts, nil -} - -// getMounts takes user-provided input from the --mount flag and creates OCI -// spec mounts. -// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... -// buildah run --mount type=tmpfs,target=/dev/shm ... -func getMounts(mounts []string) (map[string]specs.Mount, error) { - finalMounts := make(map[string]specs.Mount) - - errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") - - // TODO(vrothberg): the manual parsing can be replaced with a regular expression - // to allow a more robust parsing of the mount format and to give - // precise errors regarding supported format versus supported options. - for _, mount := range mounts { - arr := strings.SplitN(mount, ",", 2) - if len(arr) < 2 { - return nil, errors.Wrapf(errInvalidSyntax, "%q", mount) - } - kv := strings.Split(arr[0], "=") - // TODO: type is not explicitly required in Docker. - // If not specified, it defaults to "volume". - if len(kv) != 2 || kv[0] != "type" { - return nil, errors.Wrapf(errInvalidSyntax, "%q", mount) - } - - tokens := strings.Split(arr[1], ",") - switch kv[1] { - case TypeBind: - mount, err := GetBindMount(tokens) - if err != nil { - return nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, mount.Destination) - } - finalMounts[mount.Destination] = mount - case TypeTmpfs: - mount, err := GetTmpfsMount(tokens) - if err != nil { - return nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, mount.Destination) - } - finalMounts[mount.Destination] = mount - default: - return nil, errors.Errorf("invalid filesystem type %q", kv[1]) - } - } - - return finalMounts, nil -} - -// GetBindMount parses a single bind mount entry from the --mount flag. -func GetBindMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeBind, - } - - setSource := false - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "bind-nonrecursive": - newMount.Options = append(newMount.Options, "bind") - case "ro", "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z": - newMount.Options = append(newMount.Options, kv[0]) - case "bind-propagation": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, kv[1]) - case "src", "source": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeHostDir(kv[1]); err != nil { - return newMount, err - } - newMount.Source = kv[1] - setSource = true - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - case "consistency": - // Option for OS X only, has no meaning on other platforms - // and can thus be safely ignored. - // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts - default: - return newMount, errors.Wrapf(errBadMntOption, kv[0]) - } - } - - if !setDest { - return newMount, noDestError - } - - if !setSource { - newMount.Source = newMount.Destination - } - - opts, err := parse.ValidateVolumeOpts(newMount.Options) - if err != nil { - return newMount, err - } - newMount.Options = opts - - return newMount, nil -} - -// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag -func GetTmpfsMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeTmpfs, - Source: TypeTmpfs, - } - - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "ro", "nosuid", "nodev", "noexec": - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "tmpfs-mode": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) - case "tmpfs-size": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) - case "src", "source": - return newMount, errors.Errorf("source is not supported with tmpfs mounts") - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - default: - return newMount, errors.Wrapf(errBadMntOption, kv[0]) - } - } - - if !setDest { - return newMount, noDestError - } - - return newMount, nil -} - // ValidateVolumeHostDir validates a volume mount's source directory func ValidateVolumeHostDir(hostDir string) error { return parse.ValidateVolumeHostDir(hostDir) } -// validates the host path of buildah --volume -func validateVolumeMountHostDir(hostDir string) error { - if !filepath.IsAbs(hostDir) { - return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) - } - if _, err := os.Stat(hostDir); err != nil { - return errors.WithStack(err) - } - return nil -} - // ValidateVolumeCtrDir validates a volume mount's destination directory. func ValidateVolumeCtrDir(ctrDir string) error { return parse.ValidateVolumeCtrDir(ctrDir) @@ -508,20 +283,26 @@ func validateIPAddress(val string) (string, error) { // SystemContextFromOptions returns a SystemContext populated with values // per the input parameters provided by the caller for the use in authentication. func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { - certDir, err := c.Flags().GetString("cert-dir") + return SystemContextFromFlagSet(c.Flags(), c.Flag) +} + +// SystemContextFromFlagSet returns a SystemContext populated with values +// per the input parameters provided by the caller for the use in authentication. +func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*types.SystemContext, error) { + certDir, err := flags.GetString("cert-dir") if err != nil { certDir = "" } ctx := &types.SystemContext{ DockerCertPath: certDir, } - tlsVerify, err := c.Flags().GetBool("tls-verify") - if err == nil && c.Flag("tls-verify").Changed { + tlsVerify, err := flags.GetBool("tls-verify") + if err == nil && findFlagFunc("tls-verify").Changed { ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify) ctx.OCIInsecureSkipTLSVerify = !tlsVerify ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify } - disableCompression, err := c.Flags().GetBool("disable-compression") + disableCompression, err := flags.GetBool("disable-compression") if err == nil { if disableCompression { ctx.OCIAcceptUncompressedLayers = true @@ -529,59 +310,59 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { ctx.DirForceCompress = true } } - creds, err := c.Flags().GetString("creds") - if err == nil && c.Flag("creds").Changed { + creds, err := flags.GetString("creds") + if err == nil && findFlagFunc("creds").Changed { var err error ctx.DockerAuthConfig, err = AuthConfig(creds) if err != nil { return nil, err } } - sigPolicy, err := c.Flags().GetString("signature-policy") - if err == nil && c.Flag("signature-policy").Changed { + sigPolicy, err := flags.GetString("signature-policy") + if err == nil && findFlagFunc("signature-policy").Changed { ctx.SignaturePolicyPath = sigPolicy } - authfile, err := c.Flags().GetString("authfile") + authfile, err := flags.GetString("authfile") if err == nil { ctx.AuthFilePath = getAuthFile(authfile) } - regConf, err := c.Flags().GetString("registries-conf") - if err == nil && c.Flag("registries-conf").Changed { + regConf, err := flags.GetString("registries-conf") + if err == nil && findFlagFunc("registries-conf").Changed { ctx.SystemRegistriesConfPath = regConf } - regConfDir, err := c.Flags().GetString("registries-conf-dir") - if err == nil && c.Flag("registries-conf-dir").Changed { + regConfDir, err := flags.GetString("registries-conf-dir") + if err == nil && findFlagFunc("registries-conf-dir").Changed { ctx.RegistriesDirPath = regConfDir } - shortNameAliasConf, err := c.Flags().GetString("short-name-alias-conf") - if err == nil && c.Flag("short-name-alias-conf").Changed { + shortNameAliasConf, err := flags.GetString("short-name-alias-conf") + if err == nil && findFlagFunc("short-name-alias-conf").Changed { ctx.UserShortNameAliasConfPath = shortNameAliasConf } ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", define.Version) - if c.Flag("os") != nil && c.Flag("os").Changed { + if findFlagFunc("os") != nil && findFlagFunc("os").Changed { var os string - if os, err = c.Flags().GetString("os"); err != nil { + if os, err = flags.GetString("os"); err != nil { return nil, err } ctx.OSChoice = os } - if c.Flag("arch") != nil && c.Flag("arch").Changed { + if findFlagFunc("arch") != nil && findFlagFunc("arch").Changed { var arch string - if arch, err = c.Flags().GetString("arch"); err != nil { + if arch, err = flags.GetString("arch"); err != nil { return nil, err } ctx.ArchitectureChoice = arch } - if c.Flag("variant") != nil && c.Flag("variant").Changed { + if findFlagFunc("variant") != nil && findFlagFunc("variant").Changed { var variant string - if variant, err = c.Flags().GetString("variant"); err != nil { + if variant, err = flags.GetString("variant"); err != nil { return nil, err } ctx.VariantChoice = variant } - if c.Flag("platform") != nil && c.Flag("platform").Changed { + if findFlagFunc("platform") != nil && findFlagFunc("platform").Changed { var specs []string - if specs, err = c.Flags().GetStringSlice("platform"); err != nil { + if specs, err = flags.GetStringSlice("platform"); err != nil { return nil, err } if len(specs) == 0 || specs[0] == "" { @@ -669,7 +450,7 @@ const platformSep = "/" // DefaultPlatform returns the standard platform for the current system func DefaultPlatform() string { - return runtime.GOOS + platformSep + runtime.GOARCH + return platforms.DefaultString() } // Platform separates the platform string into os, arch and variant, @@ -729,10 +510,82 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) { }, nil } +// GetBuildOutput is responsible for parsing custom build output argument i.e `build --output` flag. +// Takes `buildOutput` as string and returns BuildOutputOption +func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { + if len(buildOutput) == 1 && buildOutput == "-" { + // Feature parity with buildkit, output tar to stdout + // Read more here: https://docs.docker.com/engine/reference/commandline/build/#custom-build-outputs + return define.BuildOutputOption{Path: "", + IsDir: false, + IsStdout: true}, nil + } + if !strings.Contains(buildOutput, ",") { + // expect default --output + return define.BuildOutputOption{Path: buildOutput, + IsDir: true, + IsStdout: false}, nil + } + isDir := true + isStdout := false + typeSelected := false + pathSelected := false + path := "" + tokens := strings.Split(buildOutput, ",") + for _, option := range tokens { + arr := strings.SplitN(option, "=", 2) + if len(arr) != 2 { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput) + } + switch arr[0] { + case "type": + if typeSelected { + return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0]) + } + typeSelected = true + if arr[1] == "local" { + isDir = true + } else if arr[1] == "tar" { + isDir = false + } else { + return define.BuildOutputOption{}, fmt.Errorf("invalid type %q selected for build output options %q", arr[1], buildOutput) + } + case "dest": + if pathSelected { + return define.BuildOutputOption{}, fmt.Errorf("Duplicate %q not supported", arr[0]) + } + pathSelected = true + path = arr[1] + default: + return define.BuildOutputOption{}, fmt.Errorf("Unrecognized key %q in build output option: %q", arr[0], buildOutput) + } + } + + if !typeSelected || !pathSelected { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, accepted keys are type and dest must be present", buildOutput) + } + + if path == "-" { + if isDir { + return define.BuildOutputOption{}, fmt.Errorf("invalid build output option %q, type=local and dest=- is not supported", buildOutput) + } + return define.BuildOutputOption{Path: "", + IsDir: false, + IsStdout: true}, nil + } + + return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil +} + // IDMappingOptions parses the build options related to user namespaces and ID mapping. func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { - user := c.Flag("userns-uid-map-user").Value.String() - group := c.Flag("userns-gid-map-group").Value.String() + return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) +} + +// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping. +func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { + user := findFlagFunc("userns-uid-map-user").Value.String() + group := findFlagFunc("userns-gid-map-group").Value.String() // If only the user or group was specified, use the same value for the // other, since we need both in order to initialize the maps using the // names. @@ -751,7 +604,7 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } mappings = submappings } - globalOptions := c.PersistentFlags() + globalOptions := persistentFlags // We'll parse the UID and GID mapping options the same way. buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) { outmap := make([]specs.LinuxIDMapping, 0, len(basemap)) @@ -769,8 +622,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed { spec, _ = globalOptions.GetStringSlice(option) } - if c.Flag(option).Changed { - spec, _ = c.Flags().GetStringSlice(option) + if findFlagFunc(option).Changed { + spec, _ = flags.GetStringSlice(option) } idmap, err := parseIDMap(spec) if err != nil { @@ -811,8 +664,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } // If the user specifically requested that we either use or don't use // user namespaces, override that default. - if c.Flag("userns").Changed { - how := c.Flag("userns").Value.String() + if findFlagFunc("userns").Changed { + how := findFlagFunc("userns").Value.String() switch how { case "", "container", "private": usernsOption.Host = false @@ -829,13 +682,6 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } usernsOptions = define.NamespaceOptions{usernsOption} - usernetwork := c.Flags().Lookup("network") - if usernetwork != nil && !usernetwork.Changed { - usernsOptions = append(usernsOptions, define.NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: usernsOption.Host, - }) - } // If the user requested that we use the host namespace, but also that // we use mappings, that's not going to work. if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host { @@ -877,23 +723,30 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) { // NamespaceOptions parses the build options for all namespaces except for user namespace. func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { + return NamespaceOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// NamespaceOptionsFromFlagSet parses the build options for all namespaces except for user namespace. +func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { options := make(define.NamespaceOptions, 0, 7) policy := define.NetworkDefault - for _, what := range []string{string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} { - if c.Flags().Lookup(what) != nil && c.Flag(what).Changed { - how := c.Flag(what).Value.String() + for _, what := range []string{"cgroupns", string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} { + if flags.Lookup(what) != nil && findFlagFunc(what).Changed { + how := findFlagFunc(what).Value.String() switch what { - case "network": - what = string(specs.NetworkNamespace) + case "cgroupns": + what = string(specs.CgroupNamespace) } switch how { case "", "container", "private": logrus.Debugf("setting %q namespace to %q", what, "") + policy = define.NetworkEnabled options.AddOrReplace(define.NamespaceOption{ Name: what, }) case "host": logrus.Debugf("setting %q namespace to host", what) + policy = define.NetworkEnabled options.AddOrReplace(define.NamespaceOption{ Name: what, Host: true, @@ -910,8 +763,11 @@ func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOption } } how = strings.TrimPrefix(how, "ns:") - if _, err := os.Stat(how); err != nil { - return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go + if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) { + if _, err := os.Stat(how); err != nil { + return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + } } policy = define.NetworkEnabled logrus.Debugf("setting %q namespace to %q", what, how) @@ -1034,35 +890,60 @@ func GetTempDir() string { } // Secrets parses the --secret flag -func Secrets(secrets []string) (map[string]string, error) { - parsed := make(map[string]string) - invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar") +func Secrets(secrets []string) (map[string]define.Secret, error) { + invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]") + parsed := make(map[string]define.Secret) for _, secret := range secrets { - split := strings.Split(secret, ",") - if len(split) > 2 { + tokens := strings.Split(secret, ",") + var id, src, typ string + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "id": + id = kv[1] + case "src": + src = kv[1] + case "env": + src = kv[1] + typ = "env" + case "type": + if kv[1] != "file" && kv[1] != "env" { + return nil, errors.New("invalid secret type, must be file or env") + } + typ = kv[1] + } + } + if id == "" { return nil, invalidSyntax } - if len(split) == 2 { - id := strings.Split(split[0], "=") - src := strings.Split(split[1], "=") - if len(split) == 2 && strings.ToLower(id[0]) == "id" && strings.ToLower(src[0]) == "src" { - fullPath, err := filepath.Abs(src[1]) - if err != nil { - return nil, err - } - _, err = os.Stat(fullPath) - if err == nil { - parsed[id[1]] = fullPath - } - if err != nil { - return nil, errors.Wrap(err, "could not parse secrets") - } + if src == "" { + src = id + } + if typ == "" { + if _, ok := os.LookupEnv(id); ok { + typ = "env" } else { - return nil, invalidSyntax + typ = "file" } - } else { - return nil, invalidSyntax } + + if typ == "file" { + fullPath, err := filepath.Abs(src) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + _, err = os.Stat(fullPath) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + src = fullPath + } + newSecret := define.Secret{ + Source: src, + SourceType: typ, + } + parsed[id] = newSecret + } return parsed, nil } @@ -1085,3 +966,20 @@ func SSH(sshSources []string) (map[string]*sshagent.Source, error) { } return parsed, nil } + +func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) { + if path != "" { + excludes, err := imagebuilder.ParseIgnore(path) + return excludes, path, err + } + path = filepath.Join(contextDir, ".containerignore") + excludes, err := imagebuilder.ParseIgnore(path) + if os.IsNotExist(err) { + path = filepath.Join(contextDir, ".dockerignore") + excludes, err = imagebuilder.ParseIgnore(path) + } + if os.IsNotExist(err) { + return excludes, "", nil + } + return excludes, path, err +} diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go index d02ea2a4d8d..b985cd2b01a 100644 --- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -111,8 +111,9 @@ func (a *AgentServer) Serve(processLabel string) (string, error) { a.wg.Done() }() // the only way to get agent.ServeAgent is to close the connection it's serving on + // TODO: ideally we should use some sort of forwarding mechanism for output instead of manually closing connection. go func() { - time.Sleep(500 * time.Millisecond) + time.Sleep(2000 * time.Millisecond) c.Close() }() } diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 7149ac98631..d203e60651d 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -6,7 +6,6 @@ import ( "time" "github.com/containers/buildah/define" - "github.com/containers/buildah/pkg/blobcache" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" @@ -63,21 +62,27 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s libimageOptions.OciDecryptConfig = options.OciDecryptConfig libimageOptions.AllTags = options.AllTags libimageOptions.RetryDelay = &options.RetryDelay + libimageOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) if options.MaxRetries > 0 { retries := uint(options.MaxRetries) libimageOptions.MaxRetries = &retries } - if options.BlobDirectory != "" { - libimageOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) - } pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String()) if err != nil { return "", err } + // Note: It is important to do this before we pull any images/create containers. + // The default backend detection logic needs an empty store to correctly detect + // that we can use netavark, if the store was not empty it will use CNI to not break existing installs. + _, err = getNetworkInterface(options.Store, "", "") + if err != nil { + return "", err + } + runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) if err != nil { return "", err diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go index 78adb69396c..65bda9ca011 100644 --- a/vendor/github.com/containers/buildah/push.go +++ b/vendor/github.com/containers/buildah/push.go @@ -10,6 +10,7 @@ import ( "github.com/containers/common/libimage" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" @@ -20,11 +21,30 @@ import ( "github.com/sirupsen/logrus" ) +// cacheLookupReferenceFunc wraps a BlobCache into a +// libimage.LookupReferenceFunc to allow for using a BlobCache during +// image-copy operations. +func cacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc { + // Using a closure here allows us to reference a BlobCache without + // having to explicitly maintain it in the libimage API. + return func(ref types.ImageReference) (types.ImageReference, error) { + if directory == "" { + return ref, nil + } + ref, err := blobcache.NewBlobCache(ref, directory, compress) + if err != nil { + return nil, errors.Wrapf(err, "error using blobcache %q", directory) + } + return ref, nil + } +} + // PushOptions can be used to alter how an image is copied somewhere. type PushOptions struct { // Compression specifies the type of compression which is applied to // layer blobs. The default is to not use compression, but // archive.Gzip is recommended. + // OBSOLETE: Use CompressionFormat instead. Compression archive.Compression // SignaturePolicyPath specifies an override location for the signature // policy which should be used for verifying the new image as it is @@ -71,6 +91,11 @@ type PushOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int } // Push copies the contents of the image to a new location. @@ -84,19 +109,19 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options libimageOptions.RetryDelay = &options.RetryDelay libimageOptions.OciEncryptConfig = options.OciEncryptConfig libimageOptions.OciEncryptLayers = options.OciEncryptLayers + libimageOptions.CompressionFormat = options.CompressionFormat + libimageOptions.CompressionLevel = options.CompressionLevel libimageOptions.PolicyAllowStorage = true if options.Quiet { libimageOptions.Writer = nil } - if options.BlobDirectory != "" { - compress := types.PreserveOriginal - if options.Compression == archive.Gzip { - compress = types.Compress - } - libimageOptions.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, compress) + compress := types.PreserveOriginal + if options.Compression == archive.Gzip { + compress = types.Compress } + libimageOptions.SourceLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, compress) runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) if err != nil { diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 935630cae34..e56aac8c9dc 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -5,7 +5,9 @@ import ( "io" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/image/v5/types" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -83,6 +85,8 @@ type RunOptions struct { Runtime string // Args adds global arguments for the runtime. Args []string + // NoHosts use the images /etc/hosts file + NoHosts bool // NoPivot adds the --no-pivot runtime flag. NoPivot bool // Mounts are additional mount points which we want to provide. @@ -93,6 +97,8 @@ type RunOptions struct { User string // WorkingDir is an override for the working directory. WorkingDir string + // ContextDir is used as the root directory for the source location for mounts that are of type "bind". + ContextDir string // Shell is default shell to run in a container. Shell string // Cmd is an override for the configured default command. @@ -139,20 +145,36 @@ type RunOptions struct { // Devices are the additional devices to add to the containers Devices define.ContainerDevices // Secrets are the available secrets to use in a RUN - Secrets map[string]string + Secrets map[string]define.Secret // SSHSources is the available ssh agents to use in a RUN SSHSources map[string]*sshagent.Source `json:"-"` // RunMounts are mounts for this run. RunMounts for this run // will not show up in subsequent runs. RunMounts []string + // Map of stages and container mountpoint if any from stage executor + StageMountPoints map[string]internal.StageMountDetails + // External Image mounts to be cleaned up. + // Buildah run --mount could mount image before RUN calls, RUN could cleanup + // them up as well + ExternalImageMounts []string + // System context of current build + SystemContext *types.SystemContext + // CgroupManager to use for running OCI containers + CgroupManager string } // RunMountArtifacts are the artifacts created when using a run mount. type runMountArtifacts struct { // RunMountTargets are the run mount targets inside the container RunMountTargets []string + // TmpFiles are artifacts that need to be removed outside the container + TmpFiles []string + // Any external images which were mounted inside container + MountedImages []string // Agents are the ssh agents started Agents []*sshagent.AgentServer // SSHAuthSock is the path to the ssh auth sock inside the container SSHAuthSock string + // LockedTargets to be unlocked if there are any. + LockedTargets []string } diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 113c83ef9a2..f52754c544e 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -1,10 +1,10 @@ +//go:build linux // +build linux package buildah import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -12,32 +12,43 @@ import ( "net" "os" "os/exec" + "os/signal" "path/filepath" "runtime" "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" - "github.com/containernetworking/cni/libcni" "github.com/containers/buildah/bind" "github.com/containers/buildah/chroot" "github.com/containers/buildah/copier" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + internalParse "github.com/containers/buildah/internal/parse" + internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/overlay" + "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/libnetwork/network" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/chown" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/defaultnet" "github.com/containers/common/pkg/subscriptions" + imagetypes "github.com/containers/image/v5/types" + "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/unshare" + storagetypes "github.com/containers/storage/types" "github.com/docker/go-units" "github.com/docker/libnetwork/resolvconf" "github.com/docker/libnetwork/types" @@ -152,11 +163,16 @@ func (b *Builder) Run(command []string, options RunOptions) error { setupTerminal(g, options.Terminal, options.TerminalSize) - configureNetwork, configureNetworks, err := b.configureNamespaces(g, options) + configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options) if err != nil { return err } + // rootless and networks are not supported + if len(configureNetworks) > 0 && isolation == IsolationOCIRootless { + return errors.New("cannot use networks as rootless") + } + homeDir, err := b.configureUIDGID(g, mountPoint, options) if err != nil { return err @@ -176,16 +192,19 @@ func (b *Builder) Run(command []string, options RunOptions) error { return err } - // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err + uid, gid := spec.Process.User.UID, spec.Process.User.GID + if spec.Linux != nil { + uid, gid, err = util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, uid, gid) + if err != nil { + return err + } } - rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} mode := os.FileMode(0755) coptions := copier.MkdirOptions{ - ChownNew: rootIDPair, + ChownNew: idPair, ChmodNew: &mode, } if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil { @@ -196,14 +215,31 @@ func (b *Builder) Run(command []string, options RunOptions) error { namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...) volumes := b.Volumes() - if !contains(volumes, "/etc/hosts") { - hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair) + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + hostFile := "" + if !options.NoHosts && !contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { + hostFile, err = b.generateHosts(path, rootIDPair, mountPoint) if err != nil { return err } - // Only bind /etc/hosts if there's a network - if options.ConfigureNetwork != define.NetworkDisabled { - bindFiles["/etc/hosts"] = hostFile + bindFiles[config.DefaultHostsFile] = hostFile + } + + // generate /etc/hostname if the user intentionally did not override + if !(contains(volumes, "/etc/hostname")) { + if _, ok := bindFiles["/etc/hostname"]; !ok { + hostFile, err := b.generateHostname(path, spec.Hostname, rootIDPair) + if err != nil { + return err + } + // Bind /etc/hostname + bindFiles["/etc/hostname"] = hostFile } } @@ -229,7 +265,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { rootless = 1 } // Populate the .containerenv with container information - containerenv := fmt.Sprintf(`\ + containerenv := fmt.Sprintf(` engine="buildah-%s" name=%q id=%q @@ -247,7 +283,7 @@ rootless=%d bindFiles["/run/.containerenv"] = containerenvPath } - runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions, options.Secrets, options.SSHSources, options.RunMounts) + runArtifacts, err := b.setupMounts(options.SystemContext, mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.Secrets, options.SSHSources, options.RunMounts, options.ContextDir, options.StageMountPoints) if err != nil { return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID) } @@ -256,36 +292,29 @@ rootless=%d spec.Process.Env = append(spec.Process.Env, sshenv) } + // following run was called from `buildah run` + // and some images were mounted for this run + // add them to cleanup artifacts + if len(options.ExternalImageMounts) > 0 { + runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...) + } + defer func() { - if err := cleanupRunMounts(mountPoint, runArtifacts); err != nil { - options.Logger.Errorf("unabe to cleanup run mounts %v", err) + if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil { + options.Logger.Errorf("unable to cleanup run mounts %v", err) } }() defer b.cleanupTempVolumes() - if options.CNIConfigDir == "" { - options.CNIConfigDir = b.CNIConfigDir - if b.CNIConfigDir == "" { - options.CNIConfigDir = define.DefaultCNIConfigDir - } - } - if options.CNIPluginPath == "" { - options.CNIPluginPath = b.CNIPluginPath - if b.CNIPluginPath == "" { - options.CNIPluginPath = define.DefaultCNIPluginPath - } - } - switch isolation { case define.IsolationOCI: var moreCreateArgs []string if options.NoPivot { - moreCreateArgs = []string{"--no-pivot"} - } else { - moreCreateArgs = nil + moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) case IsolationChroot: err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr) case IsolationOCIRootless: @@ -293,10 +322,8 @@ rootless=%d if options.NoPivot { moreCreateArgs = append(moreCreateArgs, "--no-pivot") } - if err := setupRootlessSpecChanges(spec, path, b.CommonBuildOpts.ShmSize); err != nil { - return err - } - err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, define.Package+"-"+filepath.Base(path)) + err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, + mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostFile) default: err = errors.Errorf("don't know how to run this command") } @@ -413,7 +440,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin return mounts, nil } -func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions, secrets map[string]string, sshSources map[string]*sshagent.Source, runFileMounts []string) (*runMountArtifacts, error) { +func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, secrets map[string]define.Secret, sshSources map[string]*sshagent.Source, runFileMounts []string, contextDir string, stageMountPoints map[string]internal.StageMountDetails) (*runMountArtifacts, error) { // Start building a new list of mounts. var mounts []specs.Mount haveMount := func(destination string) bool { @@ -426,79 +453,9 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return false } - ipc := namespaceOptions.Find(string(specs.IPCNamespace)) - hostIPC := ipc == nil || ipc.Host - net := namespaceOptions.Find(string(specs.NetworkNamespace)) - hostNetwork := net == nil || net.Host - user := namespaceOptions.Find(string(specs.UserNamespace)) - hostUser := (user == nil || user.Host) && !unshare.IsRootless() - - // Copy mounts from the generated list. - mountCgroups := true - specMounts := []specs.Mount{} - for _, specMount := range spec.Mounts { - // Override some of the mounts from the generated list if we're doing different things with namespaces. - if specMount.Destination == "/dev/shm" { - specMount.Options = []string{"nosuid", "noexec", "nodev", "mode=1777"} - if shmSize != "" { - specMount.Options = append(specMount.Options, "size="+shmSize) - } - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/shm"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/shm is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/shm", - Type: "bind", - Destination: "/dev/shm", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/dev/mqueue" { - if hostIPC && !hostUser { - if _, err := os.Stat("/dev/mqueue"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/dev/mqueue is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/dev/mqueue", - Type: "bind", - Destination: "/dev/mqueue", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, - } - } - } - if specMount.Destination == "/sys" { - if hostNetwork && !hostUser { - mountCgroups = false - if _, err := os.Stat("/sys"); err != nil && os.IsNotExist(err) { - logrus.Debugf("/sys is not present, not binding into container") - continue - } - specMount = specs.Mount{ - Source: "/sys", - Type: "bind", - Destination: "/sys", - Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev", "ro"}, - } - } - } - specMounts = append(specMounts, specMount) - } - - // Add a mount for the cgroups filesystem, unless we're already - // recursively bind mounting all of /sys, in which case we shouldn't - // bother with it. - sysfsMount := []specs.Mount{} - if mountCgroups { - sysfsMount = []specs.Mount{{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{bind.NoBindOption, "nosuid", "noexec", "nodev", "relatime", "ro"}, - }} + specMounts, err := setupSpecialMountSpecChanges(spec, b.CommonBuildOpts.ShmSize) + if err != nil { + return nil, err } // Get the list of files we need to bind into the container. @@ -517,12 +474,18 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return nil, err } + // Get host UID and GID of the container process. + processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) + if err != nil { + return nil, err + } + // Get the list of subscriptions mounts. subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) // Get the list of mounts that are just for this Run() call. // TODO: acui: de-spaghettify run mounts - runMounts, mountArtifacts, err := runSetupRunMounts(runFileMounts, secrets, sshSources, b.MountLabel, cdir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, b.ProcessLabel) + runMounts, mountArtifacts, err := b.runSetupRunMounts(context, runFileMounts, secrets, stageMountPoints, sshSources, cdir, contextDir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, int(rootUID), int(rootGID), int(processUID), int(processGID)) if err != nil { return nil, err } @@ -532,11 +495,6 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st if err != nil { return nil, err } - // Get host UID and GID of the container process. - processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) - if err != nil { - return nil, err - } // Get the list of explicitly-specified volume mounts. volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID), int(processUID), int(processGID)) @@ -544,7 +502,12 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return nil, err } - allMounts := util.SortMounts(append(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...), sysfsMount...)) + // prepare list of mount destinations which can be cleaned up safely. + // we can clean bindFiles, subscriptionMounts and specMounts + // everything other than these might have users content + mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...) + + allMounts := util.SortMounts(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...)) // Add them all, in the preferred order, except where they conflict with something that was previously added. for _, mount := range allMounts { if haveMount(mount.Destination) { @@ -560,6 +523,23 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return mountArtifacts, nil } +// Destinations which can be cleaned up after every RUN +func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { + mountDest := []string{} + for _, mount := range mounts { + // Add all destination to mountArtifacts so that they can be cleaned up later + if mount.Destination != "" { + // we dont want to remove destinations with /etc, /dev, /sys, /proc as rootfs already contains these files + // and unionfs will create a `whiteout` i.e `.wh` files on removal of overlapping files from these directories. + // everything other than these will be cleanedup + if !strings.HasPrefix(mount.Destination, "/etc") && !strings.HasPrefix(mount.Destination, "/dev") && !strings.HasPrefix(mount.Destination, "/sys") && !strings.HasPrefix(mount.Destination, "/proc") { + mountDest = append(mountDest, mount.Destination) + } + } + } + return mountDest +} + // addResolvConf copies files from host and sets them up to bind mount into container func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaceOptions define.NamespaceOptions) (string, error) { resolvConf := "/etc/resolv.conf" @@ -664,44 +644,58 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe } // generateHosts creates a containers hosts file -func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownOpts *idtools.IDPair) (string, error) { - hostPath := "/etc/hosts" - stat, err := os.Stat(hostPath) +func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoot string) (string, error) { + conf, err := config.Default() if err != nil { return "", err } - hosts := bytes.NewBufferString("# Generated by Buildah\n") - orig, err := ioutil.ReadFile(hostPath) + path, err := etchosts.GetBaseHostFile(conf.Containers.BaseHostsFile, imageRoot) if err != nil { return "", err } - hosts.Write(orig) - for _, host := range addHosts { - // verify the host format - values := strings.SplitN(host, ":", 2) - if len(values) != 2 { - return "", errors.Errorf("unable to parse host entry %q: incorrect format", host) - } - if values[0] == "" { - return "", errors.Errorf("hostname in host entry %q is empty", host) - } - if values[1] == "" { - return "", errors.Errorf("IP address in host entry %q is empty", host) - } - hosts.Write([]byte(fmt.Sprintf("%s\t%s\n", values[1], values[0]))) + + targetfile := filepath.Join(rdir, "hosts") + if err := etchosts.New(&etchosts.Params{ + BaseFile: path, + ExtraHosts: b.CommonBuildOpts.AddHost, + HostContainersInternalIP: etchosts.GetHostContainersInternalIP(conf, nil, nil), + TargetFile: targetfile, + }); err != nil { + return "", err } - if hostname != "" { - hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s\n", hostname))) - hosts.Write([]byte(fmt.Sprintf("::1 %s\n", hostname))) + uid := 0 + gid := 0 + if chownOpts != nil { + uid = chownOpts.UID + gid = chownOpts.GID } - cfile := filepath.Join(rdir, filepath.Base(hostPath)) - if err = ioutils.AtomicWriteFile(cfile, hosts.Bytes(), stat.Mode().Perm()); err != nil { - return "", errors.Wrapf(err, "error writing /etc/hosts into the container") + if err = os.Chown(targetfile, uid, gid); err != nil { + return "", err } - uid := int(stat.Sys().(*syscall.Stat_t).Uid) - gid := int(stat.Sys().(*syscall.Stat_t).Gid) + if err := label.Relabel(targetfile, b.MountLabel, false); err != nil { + return "", err + } + + return targetfile, nil +} + +// generateHostname creates a containers /etc/hostname file +func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDPair) (string, error) { + var err error + hostnamePath := "/etc/hostname" + + var hostnameBuffer bytes.Buffer + hostnameBuffer.Write([]byte(fmt.Sprintf("%s\n", hostname))) + + cfile := filepath.Join(rdir, filepath.Base(hostnamePath)) + if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil { + return "", errors.Wrapf(err, "error writing /etc/hostname into the container") + } + + uid := 0 + gid := 0 if chownOpts != nil { uid = chownOpts.UID gid = chownOpts.GID @@ -736,7 +730,8 @@ func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, termina } } -func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { +func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, + containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { if options.Logger == nil { options.Logger = logrus.StandardLogger() } @@ -772,11 +767,10 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe runtime := options.Runtime if runtime == "" { runtime = util.Runtime() - - localRuntime := util.FindLocalRuntime(runtime) - if localRuntime != "" { - runtime = localRuntime - } + } + localRuntime := util.FindLocalRuntime(runtime) + if localRuntime != "" { + runtime = localRuntime } // Default to just passing down our stdio. @@ -795,7 +789,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if err = unix.Pipe(finishCopy); err != nil { return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio") } - finishedCopy := make(chan struct{}) + finishedCopy := make(chan struct{}, 1) var pargs []string if spec.Process != nil { pargs = spec.Process.Args @@ -820,6 +814,9 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { return 1, err } + if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { + return 1, err + } errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} // Set stdio to our pipes. @@ -839,26 +836,36 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe } } + runtimeArgs := options.Args[:] + if options.CgroupManager == config.SystemdCgroupsManager { + runtimeArgs = append(runtimeArgs, "--systemd-cgroup") + } + // Build the commands that we'll execute. pidFile := filepath.Join(bundlePath, "pid") - args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) + args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) create := exec.Command(runtime, args...) + setPdeathsig(create) create.Dir = bundlePath stdin, stdout, stderr := getCreateStdio() create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr - if create.SysProcAttr == nil { - create.SysProcAttr = &syscall.SysProcAttr{} - } args = append(options.Args, "start", containerName) start := exec.Command(runtime, args...) + setPdeathsig(start) start.Dir = bundlePath start.Stderr = os.Stderr - args = append(options.Args, "kill", containerName) - kill := exec.Command(runtime, args...) - kill.Dir = bundlePath - kill.Stderr = os.Stderr + kill := func(signal string) *exec.Cmd { + args := append(options.Args, "kill", containerName) + if signal != "" { + args = append(args, signal) + } + kill := exec.Command(runtime, args...) + kill.Dir = bundlePath + kill.Stderr = os.Stderr + return kill + } args = append(options.Args, "delete", containerName) del := exec.Command(runtime, args...) @@ -891,7 +898,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if err != nil { return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) } - stopped := false + var stopped uint32 var reaping sync.WaitGroup reaping.Add(1) go func() { @@ -902,17 +909,20 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe wstatus = 0 options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err) } - stopped = true + atomic.StoreUint32(&stopped, 1) }() if configureNetwork { - teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, pargs) - if teardown != nil { - defer teardown() - } - if err != nil { + if _, err := containerCreateW.Write([]byte{1}); err != nil { return 1, err } + containerCreateW.Close() + logrus.Debug("waiting for parent start message") + b := make([]byte, 1) + if _, err := containerStartR.Read(b); err != nil { + return 1, errors.Wrap(err, "did not get container start message from parent") + } + containerStartR.Close() } if copyPipes { @@ -935,14 +945,24 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe return 1, errors.Wrapf(err, "error from %s starting container", runtime) } defer func() { - if !stopped { - if err2 := kill.Run(); err2 != nil { - options.Logger.Infof("error from %s stopping container: %v", runtime, err2) + if atomic.LoadUint32(&stopped) == 0 { + if err := kill("").Run(); err != nil { + options.Logger.Infof("error from %s stopping container: %v", runtime, err) } + atomic.StoreUint32(&stopped, 1) } }() // Wait for the container to exit. + interrupted := make(chan os.Signal, 100) + go func() { + for range interrupted { + if err := kill("SIGKILL").Run(); err != nil { + logrus.Errorf("%v sending SIGKILL", err) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) for { now := time.Now() var state specs.State @@ -952,7 +972,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe stat.Stderr = os.Stderr stateOutput, err := stat.Output() if err != nil { - if stopped { + if atomic.LoadUint32(&stopped) != 0 { // container exited break } @@ -964,23 +984,25 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe switch state.Status { case "running": case "stopped": - stopped = true + atomic.StoreUint32(&stopped, 1) default: return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) } - if stopped { + if atomic.LoadUint32(&stopped) != 0 { break } select { case <-finishedCopy: - stopped = true + atomic.StoreUint32(&stopped, 1) case <-time.After(time.Until(now.Add(100 * time.Millisecond))): continue } - if stopped { + if atomic.LoadUint32(&stopped) != 0 { break } } + signal.Stop(interrupted) + close(interrupted) // Close the writing end of the stop-handling-stdio notification pipe. unix.Close(finishCopy[1]) @@ -1066,7 +1088,8 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) { unix.CloseOnExec(fd) } - cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") + cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0") + setPdeathsig(cmd) cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} @@ -1107,76 +1130,16 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) { }, nil } -func runConfigureNetwork(isolation define.Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) { - var netconf, undo []*libcni.NetworkConfigList - +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), netStatus map[string]nettypes.StatusBlock, err error) { if isolation == IsolationOCIRootless { - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { - return setupRootlessNetwork(pid) - } + teardown, err = setupRootlessNetwork(pid) + return teardown, nil, err } - confdir := options.CNIConfigDir - // Create a default configuration if one is not present. - // Need to pull containers.conf settings for this one. - containersConf, err := config.Default() - if err != nil { - return nil, errors.Wrapf(err, "failed to get container config") - } - if err := defaultnet.Create(containersConf.Network.DefaultNetwork, containersConf.Network.DefaultSubnet, confdir, confdir, containersConf.Engine.MachineEnabled); err != nil { - logrus.Errorf("Failed to created default CNI network: %v", err) + if len(configureNetworks) == 0 { + configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} } - // Scan for CNI configuration files. - files, err := libcni.ConfFiles(confdir, []string{".conf"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration files named *.conf in directory %q", confdir) - } - lists, err := libcni.ConfFiles(confdir, []string{".conflist"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration list files named *.conflist in directory %q", confdir) - } - logrus.Debugf("CNI network configuration file list: %#v", append(files, lists...)) - // Read the CNI configuration files. - for _, file := range files { - nc, err := libcni.ConfFromFile(file) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration from file %q for %v", file, command) - } - if len(configureNetworks) > 0 && nc.Network != nil && (nc.Network.Name == "" || !util.StringInSlice(nc.Network.Name, configureNetworks)) { - if nc.Network.Name == "" { - logrus.Debugf("configuration in %q has no name, skipping it", file) - } else { - logrus.Debugf("configuration in %q has name %q, skipping it", file, nc.Network.Name) - } - continue - } - if nc.Network == nil { - continue - } - cl, err := libcni.ConfListFromConf(nc) - if err != nil { - return nil, errors.Wrapf(err, "error converting networking configuration from file %q for %v", file, command) - } - logrus.Debugf("using network configuration from %q", file) - netconf = append(netconf, cl) - } - for _, list := range lists { - cl, err := libcni.ConfListFromFile(list) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration list from file %q for %v", list, command) - } - if len(configureNetworks) > 0 && (cl.Name == "" || !util.StringInSlice(cl.Name, configureNetworks)) { - if cl.Name == "" { - logrus.Debugf("configuration list in %q has no name, skipping it", list) - } else { - logrus.Debugf("configuration list in %q has name %q, skipping it", list, cl.Name) - } - continue - } - logrus.Debugf("using network configuration list from %q", list) - netconf = append(netconf, cl) - } // Make sure we can access the container's network namespace, // even after it exits, to successfully tear down the // interfaces. Ensure this by opening a handle to the network @@ -1185,40 +1148,35 @@ func runConfigureNetwork(isolation define.Isolation, options RunOptions, configu netns := fmt.Sprintf("/proc/%d/ns/net", pid) netFD, err := unix.Open(netns, unix.O_RDONLY, 0) if err != nil { - return nil, errors.Wrapf(err, "error opening network namespace for %v", command) + return nil, nil, errors.Wrapf(err, "error opening network namespace") } mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) - // Build our search path for the plugins. - pluginPaths := strings.Split(options.CNIPluginPath, string(os.PathListSeparator)) - cni := libcni.CNIConfig{Path: pluginPaths} - // Configure the interfaces. - rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) - teardown = func() { - for _, nc := range undo { - if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil { - options.Logger.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) - } + + networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) + for i, network := range configureNetworks { + networks[network] = nettypes.PerNetworkOptions{ + InterfaceName: fmt.Sprintf("eth%d", i), } - unix.Close(netFD) } - for i, nc := range netconf { - // Build the runtime config for use with this network configuration. - rtconf[nc] = &libcni.RuntimeConf{ - ContainerID: containerName, - NetNS: mynetns, - IfName: fmt.Sprintf("if%d", i), - Args: [][2]string{}, - CapabilityArgs: map[string]interface{}{}, - } - // Bring it up. - _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc]) + + opts := nettypes.NetworkOptions{ + ContainerID: containerName, + ContainerName: containerName, + Networks: networks, + } + netStatus, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts}) + if err != nil { + return nil, nil, err + } + + teardown = func() { + err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts}) if err != nil { - return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) + options.Logger.Errorf("failed to cleanup network: %v", err) } - // Add it to the list of networks to take down when the container process exits. - undo = append([]*libcni.NetworkConfigList{nc}, undo...) } - return teardown, nil + + return teardown, netStatus, nil } func setNonblock(logger *logrus.Logger, fd int, description string, nonblocking bool) (bool, error) { //nolint:interfacer @@ -1248,6 +1206,7 @@ func runCopyStdio(logger *logrus.Logger, stdio *sync.WaitGroup, copyPipes bool, } stdio.Done() finishedCopy <- struct{}{} + close(finishedCopy) }() // Map describing where data on an incoming descriptor should go. relayMap := make(map[int]int) @@ -1575,8 +1534,24 @@ func runUsingRuntimeMain() { os.Exit(1) } + // open the pipes used to communicate with the parent process + var containerCreateW *os.File + var containerStartR *os.File + if options.ConfigureNetwork { + containerCreateW = os.NewFile(4, "containercreatepipe") + if containerCreateW == nil { + fmt.Fprintf(os.Stderr, "could not open fd 4\n") + os.Exit(1) + } + containerStartR = os.NewFile(5, "containerstartpipe") + if containerStartR == nil { + fmt.Fprintf(os.Stderr, "could not open fd 5\n") + os.Exit(1) + } + } + // Run the container, start to finish. - status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName) + status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR) if err != nil { fmt.Fprintf(os.Stderr, "error running container: %v\n", err) os.Exit(1) @@ -1690,7 +1665,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti return configureNetwork, configureNetworks, configureUTS, nil } -func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) (bool, []string, error) { +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { defaultNamespaceOptions, err := DefaultNamespaceOptions() if err != nil { return false, nil, err @@ -1701,8 +1676,17 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) namespaceOptions.AddOrReplace(options.NamespaceOptions...) networkPolicy := options.ConfigureNetwork + //Nothing was specified explicitly so network policy should be inherited from builder if networkPolicy == NetworkDefault { networkPolicy = b.ConfigureNetwork + + // If builder policy was NetworkDisabled and + // we want to disable network for this run. + // reset options.ConfigureNetwork to NetworkDisabled + // since it will be treated as source of truth later. + if networkPolicy == NetworkDisabled { + options.ConfigureNetwork = networkPolicy + } } configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) @@ -1793,7 +1777,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { var foundrw, foundro, foundz, foundZ, foundO, foundU bool - var rootProp string + var rootProp, upperDir, workDir string for _, opt := range options { switch opt { case "rw": @@ -1811,6 +1795,19 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, case "private", "rprivate", "slave", "rslave", "shared", "rshared": rootProp = opt } + + if strings.HasPrefix(opt, "upperdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] + } + } + if strings.HasPrefix(opt, "workdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + workDir = splitOpt[1] + } + } } if !foundrw && !foundro { options = append(options, "rw") @@ -1831,6 +1828,10 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, } } if foundO { + if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") { + return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa") + } + containerDir, err := b.store.ContainerDirectory(b.ContainerID) if err != nil { return specs.Mount{}, err @@ -1841,7 +1842,14 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir) } - overlayMount, err := overlay.Mount(contentDir, host, container, rootUID, rootGID, b.store.GraphOptions()) + overlayOpts := overlay.Options{RootUID: rootUID, + RootGID: rootGID, + UpperDirOptionFragment: upperDir, + WorkDirOptionFragment: workDir, + GraphOpts: b.store.GraphOptions(), + } + + overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) if err == nil { b.TempVolumes[contentDir] = true } @@ -1882,7 +1890,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, // Bind mount volumes given by the user when the container was created for _, i := range volumeMounts { var options []string - spliti := strings.Split(i, ":") + spliti := parse.SplitStringWithColonEscape(i) if len(spliti) > 2 { options = strings.Split(spliti[2], ",") } @@ -1935,9 +1943,6 @@ func setupCapAdd(g *generate.Generator, caps ...string) error { if err := g.AddProcessCapabilityEffective(cap); err != nil { return errors.Wrapf(err, "error adding %q to the effective capability set", cap) } - if err := g.AddProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the inheritable capability set", cap) - } if err := g.AddProcessCapabilityPermitted(cap); err != nil { return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) } @@ -1956,9 +1961,6 @@ func setupCapDrop(g *generate.Generator, caps ...string) error { if err := g.DropProcessCapabilityEffective(cap); err != nil { return errors.Wrapf(err, "error removing %q from the effective capability set", cap) } - if err := g.DropProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the inheritable capability set", cap) - } if err := g.DropProcessCapabilityPermitted(cap); err != nil { return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) } @@ -2079,18 +2081,8 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions g.ClearProcessEnv() if b.CommonBuildOpts.HTTPProxy { - for _, envSpec := range []string{ - "http_proxy", - "HTTP_PROXY", - "https_proxy", - "HTTPS_PROXY", - "ftp_proxy", - "FTP_PROXY", - "no_proxy", - "NO_PROXY", - } { - envVal := os.Getenv(envSpec) - if envVal != "" { + for _, envSpec := range config.ProxyEnv { + if envVal, ok := os.LookupEnv(envSpec); ok { g.AddProcessEnv(envSpec, envVal) } } @@ -2104,88 +2096,162 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions } } -func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string) error { - spec.Process.User.AdditionalGids = nil - spec.Linux.Resources = nil +func addOrReplaceMount(moutns []specs.Mount, mount specs.Mount) []spec.Mount { + for i := range moutns { + if moutns[i].Destination == mount.Destination { + moutns[i] = mount + return moutns + } + } + return append(moutns, mount) +} - emptyDir := filepath.Join(bundleDir, "empty") - if err := os.Mkdir(emptyDir, 0); err != nil { - return err +// setupSpecialMountSpecChanges creates special mounts for depending on the namespaces +// logic taken from podman and adapted for buildah +// https://github.com/containers/podman/blob/4ba71f955a944790edda6e007e6d074009d437a7/pkg/specgen/generate/oci.go#L178 +func setupSpecialMountSpecChanges(spec *spec.Spec, shmSize string) ([]specs.Mount, error) { + mounts := spec.Mounts + isRootless := unshare.IsRootless() + isNewUserns := false + isNetns := false + isPidns := false + isIpcns := false + + for _, namespace := range spec.Linux.Namespaces { + switch namespace.Type { + case specs.NetworkNamespace: + isNetns = true + case specs.UserNamespace: + isNewUserns = true + case specs.PIDNamespace: + isPidns = true + case specs.IPCNamespace: + isIpcns = true + } + } + + addCgroup := true + // mount sys when root and no userns or when both netns and userns are private + canMountSys := (!isRootless && !isNewUserns) || (isNetns && isNewUserns) + if !canMountSys { + addCgroup = false + sys := "/sys" + sysMnt := specs.Mount{ + Destination: sys, + Type: "bind", + Source: sys, + Options: []string{bind.NoBindOption, "rprivate", "nosuid", "noexec", "nodev", "ro", "rbind"}, + } + mounts = addOrReplaceMount(mounts, sysMnt) } - // Replace /sys with a read-only bind mount. - mounts := []specs.Mount{ - { - Source: "/dev", - Destination: "/dev", - Type: "tmpfs", - Options: []string{"private", "strictatime", "noexec", "nosuid", "mode=755", "size=65536k"}, - }, - { - Source: "mqueue", - Destination: "/dev/mqueue", - Type: "mqueue", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "pts", + gid5Available := true + if isRootless { + _, gids, err := unshare.GetHostIDMappings("") + if err != nil { + return nil, err + } + gid5Available = checkIdsGreaterThan5(gids) + } + if gid5Available && len(spec.Linux.GIDMappings) > 0 { + gid5Available = checkIdsGreaterThan5(spec.Linux.GIDMappings) + } + if !gid5Available { + // If we have no GID mappings, the gid=5 default option would fail, so drop it. + devPts := specs.Mount{ Destination: "/dev/pts", Type: "devpts", - Options: []string{"private", "noexec", "nosuid", "newinstance", "ptmxmode=0666", "mode=0620"}, - }, - { + Source: "devpts", + Options: []string{"rprivate", "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"}, + } + mounts = addOrReplaceMount(mounts, devPts) + } + + isUserns := isNewUserns || isRootless + + if isUserns && !isIpcns { + devMqueue := "/dev/mqueue" + devMqueueMnt := specs.Mount{ + Destination: devMqueue, + Type: "bind", + Source: devMqueue, + Options: []string{bind.NoBindOption, "bind", "nosuid", "noexec", "nodev"}, + } + mounts = addOrReplaceMount(mounts, devMqueueMnt) + } + if isUserns && !isPidns { + proc := "/proc" + procMount := specs.Mount{ + Destination: proc, + Type: "bind", + Source: proc, + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + mounts = addOrReplaceMount(mounts, procMount) + } + + if addCgroup { + cgroupMnt := specs.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", "rw"}, + } + mounts = addOrReplaceMount(mounts, cgroupMnt) + } + + // if userns and host ipc bind mount shm + if isUserns && !isIpcns { + // bind mount /dev/shm when it exists + if _, err := os.Stat("/dev/shm"); err == nil { + shmMount := specs.Mount{ + Source: "/dev/shm", + Type: "bind", + Destination: "/dev/shm", + Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"}, + } + mounts = addOrReplaceMount(mounts, shmMount) + } + } else if shmSize != "" { + shmMount := specs.Mount{ Source: "shm", Destination: "/dev/shm", Type: "tmpfs", - Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", fmt.Sprintf("size=%s", shmSize)}, - }, - { - Source: "/proc", - Destination: "/proc", - Type: "proc", - Options: []string{"private", "nodev", "noexec", "nosuid"}, - }, - { - Source: "/sys", - Destination: "/sys", - Type: "bind", - Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, - }, - } - // Cover up /sys/fs/cgroup, if it exist in our source for /sys. - if _, err := os.Stat("/sys/fs/cgroup"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") - } - // Keep anything that isn't under /dev, /proc, or /sys. - for i := range spec.Mounts { - if spec.Mounts[i].Destination == "/dev" || strings.HasPrefix(spec.Mounts[i].Destination, "/dev/") || - spec.Mounts[i].Destination == "/proc" || strings.HasPrefix(spec.Mounts[i].Destination, "/proc/") || - spec.Mounts[i].Destination == "/sys" || strings.HasPrefix(spec.Mounts[i].Destination, "/sys/") { - continue + Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=" + shmSize}, } - mounts = append(mounts, spec.Mounts[i]) + mounts = addOrReplaceMount(mounts, shmMount) } - spec.Mounts = mounts - return nil + + return mounts, nil +} + +func checkIdsGreaterThan5(ids []spec.LinuxIDMapping) bool { + for _, r := range ids { + if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size { + return true + } + } + return false } -func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { +func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, + moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName, buildContainerName, hostsFile string) (err error) { var confwg sync.WaitGroup config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ - Options: options, - Spec: spec, - RootPath: rootPath, - BundlePath: bundlePath, - ConfigureNetwork: configureNetwork, - ConfigureNetworks: configureNetworks, - MoreCreateArgs: moreCreateArgs, - ContainerName: containerName, - Isolation: isolation, + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ConfigureNetwork: configureNetwork, + MoreCreateArgs: moreCreateArgs, + ContainerName: containerName, + Isolation: isolation, }) if conferr != nil { return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) } cmd := reexec.Command(runUsingRuntimeCommand) + setPdeathsig(cmd) cmd.Dir = bundlePath cmd.Stdin = options.Stdin if cmd.Stdin == nil { @@ -2212,14 +2278,102 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run } confwg.Done() }() + + // create network configuration pipes + var containerCreateR, containerCreateW fileCloser + var containerStartR, containerStartW fileCloser + if configureNetwork { + containerCreateR.file, containerCreateW.file, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerCreateR.Close() + defer containerCreateW.Close() + + containerStartR.file, containerStartW.file, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerStartR.Close() + defer containerStartW.Close() + cmd.ExtraFiles = []*os.File{containerCreateW.file, containerStartR.file} + } + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) defer preader.Close() defer pwriter.Close() - err = cmd.Run() - if err != nil { - err = errors.Wrapf(err, "error while running runtime") + if err := cmd.Start(); err != nil { + return errors.Wrapf(err, "error while starting runtime") + } + + interrupted := make(chan os.Signal, 100) + go func() { + for receivedSignal := range interrupted { + if err := cmd.Process.Signal(receivedSignal); err != nil { + logrus.Infof("%v while attempting to forward %v to child process", err, receivedSignal) + } + } + }() + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + if configureNetwork { + // we already passed the fd to the child, now close the writer so we do not hang if the child closes it + containerCreateW.Close() + if err := waitForSync(containerCreateR.file); err != nil { + // we do not want to return here since we want to capture the exit code from the child via cmd.Wait() + // close the pipes here so that the child will not hang forever + containerCreateR.Close() + containerStartW.Close() + logrus.Errorf("did not get container create message from subprocess: %v", err) + } else { + pidFile := filepath.Join(bundlePath, "pid") + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return err + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + + teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName) + if teardown != nil { + defer teardown() + } + if err != nil { + return err + } + + // only add hosts if we manage the hosts file + if hostsFile != "" { + var entries etchosts.HostEntries + if netstatus != nil { + entries = etchosts.GetNetworkHostEntries(netstatus, spec.Hostname, buildContainerName) + } else { + // we have slirp4netns, default to slirp4netns ip since this is not configurable in buildah + entries = etchosts.HostEntries{{IP: "10.0.2.100", Names: []string{spec.Hostname, buildContainerName}}} + } + // make sure to sync this with (b *Builder) generateHosts() + err = etchosts.Add(hostsFile, entries) + if err != nil { + return err + } + } + + logrus.Debug("network namespace successfully setup, send start message to child") + _, err = containerStartW.file.Write([]byte{1}) + if err != nil { + return err + } + } + } + + if err := cmd.Wait(); err != nil { + return errors.Wrapf(err, "error while running runtime") } confwg.Wait() + signal.Stop(interrupted) + close(interrupted) if err == nil { return conferr } @@ -2229,37 +2383,43 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run return err } +// fileCloser is a helper struct to prevent closing the file twice in the code +// users must call (fileCloser).Close() and not fileCloser.File.Close() +type fileCloser struct { + file *os.File + closed bool +} + +func (f *fileCloser) Close() { + if !f.closed { + if err := f.file.Close(); err != nil { + logrus.Errorf("failed to close file: %v", err) + } + f.closed = true + } +} + +// waitForSync waits for a maximum of 4 minutes to read something from the file +func waitForSync(pipeR *os.File) error { + if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil { + return err + } + b := make([]byte, 16) + _, err := pipeR.Read(b) + return err +} + func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error { switch isolation { case IsolationOCIRootless: - if ns := options.NamespaceOptions.Find(string(specs.IPCNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of an IPC namespace.") - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.IPCNamespace)}) - _, err := exec.LookPath("slirp4netns") - hostNetworking := err != nil - networkNamespacePath := "" - if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil { - hostNetworking = ns.Host - networkNamespacePath = ns.Path - if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) { - logrus.Debugf("Disabling network namespace configuration.") - networkNamespacePath = "" - } - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: hostNetworking, - Path: networkNamespacePath, - }) - if ns := options.NamespaceOptions.Find(string(specs.PIDNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a PID namespace.") - } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.PIDNamespace), Host: false}) - if ns := options.NamespaceOptions.Find(string(specs.UserNamespace)); ns == nil || ns.Host { - logrus.Debugf("Forcing use of a user namespace.") + // only change the netns if the caller did not set it + if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns == nil { + if _, err := exec.LookPath("slirp4netns"); err != nil { + // if slirp4netns is not installed we have to use the hosts net namespace + options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.NetworkNamespace), Host: true}) + } } - options.NamespaceOptions.AddOrReplace(define.NamespaceOption{Name: string(specs.UserNamespace)}) + fallthrough case IsolationOCI: pidns := options.NamespaceOptions.Find(string(specs.PIDNamespace)) userns := options.NamespaceOptions.Find(string(specs.UserNamespace)) @@ -2273,27 +2433,18 @@ func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOp // DefaultNamespaceOptions returns the default namespace settings from the // runtime-tools generator library. func DefaultNamespaceOptions() (define.NamespaceOptions, error) { - options := define.NamespaceOptions{ - {Name: string(specs.CgroupNamespace), Host: true}, - {Name: string(specs.IPCNamespace), Host: true}, - {Name: string(specs.MountNamespace), Host: true}, - {Name: string(specs.NetworkNamespace), Host: true}, - {Name: string(specs.PIDNamespace), Host: true}, - {Name: string(specs.UserNamespace), Host: true}, - {Name: string(specs.UTSNamespace), Host: true}, - } - g, err := generate.New("linux") + cfg, err := config.Default() if err != nil { - return options, errors.Wrapf(err, "error generating new 'linux' runtime spec") + return nil, errors.Wrapf(err, "failed to get container config") } - spec := g.Config - if spec.Linux != nil { - for _, ns := range spec.Linux.Namespaces { - options.AddOrReplace(define.NamespaceOption{ - Name: string(ns.Type), - Path: ns.Path, - }) - } + options := define.NamespaceOptions{ + {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"}, + {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"}, + {Name: string(specs.MountNamespace), Host: false}, + {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host"}, + {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"}, + {Name: string(specs.UserNamespace), Host: cfg.Containers.UserNS == "host"}, + {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"}, } return options, nil } @@ -2308,15 +2459,14 @@ func contains(volumes []string, v string) bool { } type runUsingRuntimeSubprocOptions struct { - Options RunOptions - Spec *specs.Spec - RootPath string - BundlePath string - ConfigureNetwork bool - ConfigureNetworks []string - MoreCreateArgs []string - ContainerName string - Isolation define.Isolation + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ConfigureNetwork bool + MoreCreateArgs []string + ContainerName string + Isolation define.Isolation } func init() { @@ -2324,13 +2474,16 @@ func init() { } // runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs -func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources map[string]*sshagent.Source, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) ([]spec.Mount, *runMountArtifacts, error) { +func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []string, secrets map[string]define.Secret, stageMountPoints map[string]internal.StageMountDetails, sshSources map[string]*sshagent.Source, containerWorkingDir string, contextDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, rootUID int, rootGID int, processUID int, processGID int) ([]spec.Mount, *runMountArtifacts, error) { mountTargets := make([]string, 0, 10) + tmpFiles := make([]string, 0, len(mounts)) + mountImages := make([]string, 0, 10) finalMounts := make([]specs.Mount, 0, len(mounts)) agents := make([]*sshagent.AgentServer, 0, len(mounts)) sshCount := 0 defaultSSHSock := "" tokens := []string{} + lockedTargets := []string{} for _, mount := range mounts { arr := strings.SplitN(mount, ",", 2) @@ -2344,17 +2497,19 @@ func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources ma // For now, we only support type secret. switch kv[1] { case "secret": - mount, err := getSecretMount(tokens, secrets, mountlabel, containerWorkingDir, uidmap, gidmap) + mount, envFile, err := getSecretMount(tokens, secrets, b.MountLabel, containerWorkingDir, uidmap, gidmap) if err != nil { return nil, nil, err } if mount != nil { finalMounts = append(finalMounts, *mount) mountTargets = append(mountTargets, mount.Destination) - + if envFile != "" { + tmpFiles = append(tmpFiles, envFile) + } } case "ssh": - mount, agent, err := getSSHMount(tokens, sshCount, sshSources, mountlabel, uidmap, gidmap, processLabel) + mount, agent, err := b.getSSHMount(tokens, sshCount, sshSources, b.MountLabel, uidmap, gidmap, b.ProcessLabel) if err != nil { return nil, nil, err } @@ -2368,28 +2523,102 @@ func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources ma // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i} sshCount++ } + case "bind": + mount, image, err := b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + // only perform cleanup if image was mounted ignore everything else + if image != "" { + mountImages = append(mountImages, image) + } + case "tmpfs": + mount, err := b.getTmpfsMount(tokens, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + case "cache": + mount, lockedPaths, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + lockedTargets = lockedPaths default: return nil, nil, errors.Errorf("invalid mount type %q", kv[1]) } } artifacts := &runMountArtifacts{ RunMountTargets: mountTargets, + TmpFiles: tmpFiles, Agents: agents, + MountedImages: mountImages, SSHAuthSock: defaultSSHSock, + LockedTargets: lockedTargets, } return finalMounts, artifacts, nil } -func getSecretMount(tokens []string, secrets map[string]string, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, error) { +func (b *Builder) getBindMount(context *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, string, error) { + if contextDir == "" { + return nil, "", errors.New("Context Directory for current run invocation is not configured") + } + var optionMounts []specs.Mount + mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, image, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, image, err + } + return &volumes[0], image, nil +} + +func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, processGID int) (*spec.Mount, error) { + var optionMounts []specs.Mount + mount, err := internalParse.GetTmpfsMount(tokens) + if err != nil { + return nil, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, err + } + return &volumes[0], nil +} + +func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, []string, error) { + var optionMounts []specs.Mount + mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, lockedTargets, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, lockedTargets, err + } + return &volumes[0], lockedTargets, nil +} + +func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, string, error) { errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") if len(tokens) == 0 { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } var err error var id, target string var required bool var uid, gid uint32 - var mode uint32 = 400 + var mode uint32 = 0400 for _, val := range tokens { kv := strings.SplitN(val, "=", 2) switch kv[0] { @@ -2400,76 +2629,94 @@ func getSecretMount(tokens []string, secrets map[string]string, mountlabel strin case "required": required, err = strconv.ParseBool(kv[1]) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } case "mode": mode64, err := strconv.ParseUint(kv[1], 8, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } mode = uint32(mode64) case "uid": uid64, err := strconv.ParseUint(kv[1], 10, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } uid = uint32(uid64) case "gid": gid64, err := strconv.ParseUint(kv[1], 10, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } gid = uint32(gid64) default: - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } } if id == "" { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } // Default location for secretis is /run/secrets/id if target == "" { target = "/run/secrets/" + id } - src, ok := secrets[id] + secr, ok := secrets[id] if !ok { if required { - return nil, errors.Errorf("secret required but no secret with id %s found", id) + return nil, "", errors.Errorf("secret required but no secret with id %s found", id) } - return nil, nil + return nil, "", nil } + var data []byte + var envFile string + var ctrFileOnHost string - // Copy secrets to container working dir, since we need to chmod, chown and relabel it - // for the container user and we don't want to mess with the original file - ctrFileOnHost := filepath.Join(containerWorkingDir, "secrets", id) - _, err = os.Stat(ctrFileOnHost) - if os.IsNotExist(err) { - data, err := ioutil.ReadFile(src) + switch secr.SourceType { + case "env": + data = []byte(os.Getenv(secr.Source)) + tmpFile, err := ioutil.TempFile("/dev/shm", "buildah*") if err != nil { - return nil, err + return nil, "", err } - if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0644); err != nil { - return nil, err + envFile = tmpFile.Name() + ctrFileOnHost = tmpFile.Name() + case "file": + data, err = ioutil.ReadFile(secr.Source) + if err != nil { + return nil, "", err } - if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { - return nil, err + ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id) + _, err = os.Stat(ctrFileOnHost) + if !os.IsNotExist(err) { + return nil, "", err } + default: + return nil, "", errors.New("invalid source secret type") + } + + // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod, + // chown and relabel it for the container user and we don't want to mess with the original file + if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { + return nil, "", err + } + if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { + return nil, "", err } if err := label.Relabel(ctrFileOnHost, mountlabel, false); err != nil { - return nil, err + return nil, "", err } hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) if err != nil { - return nil, err + return nil, "", err } if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil { - return nil, err + return nil, "", err } if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil { - return nil, err + return nil, "", err } newMount := specs.Mount{ Destination: target, @@ -2477,11 +2724,11 @@ func getSecretMount(tokens []string, secrets map[string]string, mountlabel strin Source: ctrFileOnHost, Options: []string{"bind", "rprivate", "ro"}, } - return &newMount, nil + return &newMount, envFile, nil } // getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container -func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { +func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") var err error @@ -2524,7 +2771,6 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou gid = uint32(gid64) default: return nil, nil, errInvalidSyntax - } } @@ -2556,13 +2802,13 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou if err := label.Relabel(filepath.Dir(hostSock), mountlabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := label.Relabel(hostSock, mountlabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } @@ -2570,19 +2816,19 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) if err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } @@ -2596,7 +2842,7 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou } // cleanupRunMounts cleans up run mounts so they only appear in this run. -func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { +func (b *Builder) cleanupRunMounts(context *imagetypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error { for _, agent := range artifacts.Agents { err := agent.Shutdown() if err != nil { @@ -2604,6 +2850,25 @@ func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { } } + //cleanup any mounted images for this run + for _, image := range artifacts.MountedImages { + if image != "" { + // if flow hits here some image was mounted for this run + i, err := internalUtil.LookupImage(context, b.store, image) + if err == nil { + // silently try to unmount and do nothing + // if image is being used by something else + _ = i.Unmount(false) + } + if errors.Cause(err) == storagetypes.ErrImageUnknown { + // Ignore only if ErrImageUnknown + // Reason: Image is already unmounted do nothing + continue + } + return err + } + } + opts := copier.RemoveOptions{ All: true, } @@ -2613,5 +2878,72 @@ func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { return err } } - return nil + var prevErr error + for _, path := range artifacts.TmpFiles { + err := os.Remove(path) + if !os.IsNotExist(err) { + if prevErr != nil { + logrus.Error(prevErr) + } + prevErr = err + } + } + // unlock if any locked files from this RUN statement + for _, path := range artifacts.LockedTargets { + _, err := os.Stat(path) + if err != nil { + // Lockfile not found this might be a problem, + // since LockedTargets must contain list of all locked files + // don't break here since we need to unlock other files but + // log so user can take a look + logrus.Warnf("Lockfile %q was expected here, stat failed with %v", path, err) + continue + } + lockfile, err := lockfile.GetLockfile(path) + if err != nil { + // unable to get lockfile + // lets log error and continue + // unlocking other files + logrus.Warn(err) + continue + } + if lockfile.Locked() { + lockfile.Unlock() + } else { + logrus.Warnf("Lockfile %q was expected to be locked, this is unexpected", path) + continue + } + } + return prevErr +} + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + conf, err := config.Default() + if err != nil { + return nil, err + } + // copy the config to not modify the default by accident + newconf := *conf + if len(cniConfDir) > 0 { + newconf.Network.NetworkConfigDir = cniConfDir + } + if len(cniPluginPath) > 0 { + plugins := strings.Split(cniPluginPath, string(os.PathListSeparator)) + newconf.Network.CNIPluginDirs = plugins + } + + _, netInt, err := network.NetworkBackend(store, &newconf, false) + if err != nil { + return nil, err + } + return netInt, nil +} + +// setPdeathsig sets a parent-death signal for the process +func setPdeathsig(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL } diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go index 86f7c748292..9e62691e854 100644 --- a/vendor/github.com/containers/buildah/run_unix.go +++ b/vendor/github.com/containers/buildah/run_unix.go @@ -4,6 +4,8 @@ package buildah import ( "github.com/containers/buildah/define" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" "github.com/pkg/errors" ) @@ -22,3 +24,8 @@ func (b *Builder) Run(command []string, options RunOptions) error { func DefaultNamespaceOptions() (NamespaceOptions, error) { return NamespaceOptions{}, errors.New("function not supported on non-linux systems") } + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go index 2a9029e5e1c..f0640ffe2ab 100644 --- a/vendor/github.com/containers/buildah/run_unsupported.go +++ b/vendor/github.com/containers/buildah/run_unsupported.go @@ -3,6 +3,8 @@ package buildah import ( + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" "github.com/pkg/errors" ) @@ -18,3 +20,8 @@ func (b *Builder) Run(command []string, options RunOptions) error { func DefaultNamespaceOptions() (NamespaceOptions, error) { return NamespaceOptions{}, errors.New("function not supported on non-linux systems") } + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go index 00903203e0a..83fc867a2fe 100644 --- a/vendor/github.com/containers/buildah/selinux.go +++ b/vendor/github.com/containers/buildah/selinux.go @@ -1,10 +1,15 @@ +//go:build linux // +build linux package buildah import ( + "fmt" + "os" + "github.com/opencontainers/runtime-tools/generate" selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/pkg/errors" ) func selinuxGetEnabled() bool { @@ -17,3 +22,21 @@ func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { g.SetLinuxMountLabel(mountLabel) } } + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + if !selinuxGetEnabled() || processLabel == "" || mountLabel == "" { + // SELinux is completely disabled, or we're not doing anything at all with labeling + return nil + } + pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file") + if err != nil { + return errors.Wrapf(err, "computing file creation context for pipes") + } + for i := range stdioPipe { + pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0]) + if err := selinux.SetFileLabel(pipeFdName, pipeContext); err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "setting file label on %q", pipeFdName) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go index 2646148376b..5b1948315a4 100644 --- a/vendor/github.com/containers/buildah/selinux_unsupported.go +++ b/vendor/github.com/containers/buildah/selinux_unsupported.go @@ -12,3 +12,7 @@ func selinuxGetEnabled() bool { func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { } + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + return nil +} diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 47c9ac5cd96..9bfa9d268ea 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -123,8 +123,8 @@ func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) { // isReferenceSomething checks if the registry part of a reference is insecure or blocked func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, what func(string, *types.SystemContext) (bool, error)) (bool, error) { - if ref != nil && ref.DockerReference() != nil { - if named, ok := ref.DockerReference().(reference.Named); ok { + if ref != nil { + if named := ref.DockerReference(); named != nil { if domain := reference.Domain(named); domain != "" { return what(domain, sc) } diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go index 3a23fd69038..12546dbd5ce 100644 --- a/vendor/github.com/containers/buildah/util/types.go +++ b/vendor/github.com/containers/buildah/util/types.go @@ -7,10 +7,6 @@ import ( const ( // DefaultRuntime if containers.conf fails. DefaultRuntime = define.DefaultRuntime - // DefaultCNIPluginPath is the default location of CNI plugin helpers. - DefaultCNIPluginPath = define.DefaultCNIPluginPath - // DefaultCNIConfigDir is the default location of CNI configuration files. - DefaultCNIConfigDir = define.DefaultCNIConfigDir ) var ( diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index c6f022d4ed7..33a8c5657c5 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -16,7 +16,6 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" - "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" @@ -47,9 +46,8 @@ var ( // resolveName checks if name is a valid image name, and if that name doesn't // include a domain portion, returns a list of the names which it might -// correspond to in the set of configured registries, the transport used to -// pull the image, and a boolean which is true iff -// 1) the list of search registries was used, and 2) it was empty. +// correspond to in the set of configured registries, and the transport used to +// pull the image. // // The returned image names never include a transport: prefix, and if transport != "", // (transport, image) should be a valid input to alltransports.ParseImageName. @@ -58,9 +56,9 @@ var ( // // NOTE: The "list of search registries is empty" check does not count blocked registries, // and neither the implied "localhost" nor a possible firstRegistry are counted -func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) { +func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, error) { if name == "" { - return nil, "", false, nil + return nil, "", nil } // Maybe it's a truncated image ID. Don't prepend a registry name, then. @@ -68,7 +66,7 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) { // It's a truncated version of the ID of an image that's present in local storage; // we need only expand the ID. - return []string{img.ID}, "", false, nil + return []string{img.ID}, "", nil } } // If we're referring to an image by digest, it *must* be local and we @@ -76,51 +74,32 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s if strings.HasPrefix(name, "sha256:") { d, err := digest.Parse(name) if err != nil { - return nil, "", false, err + return nil, "", err } img, err := store.Image(d.Encoded()) if err != nil { - return nil, "", false, err + return nil, "", err } - return []string{img.ID}, "", false, nil + return []string{img.ID}, "", nil } // Transports are not supported for local image look ups. srcRef, err := alltransports.ParseImageName(name) if err == nil { - return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), false, nil + return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), nil } - // Figure out the list of registries. - var registries []string - searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc) - if err != nil { - logrus.Debugf("unable to read configured registries to complete %q: %v", name, err) - searchRegistries = nil - } - for _, registry := range searchRegistries { - reg, err := sysregistriesv2.FindRegistry(sc, registry) - if err != nil { - logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err) - continue - } - if reg == nil || !reg.Blocked { - registries = append(registries, registry) - } - } - searchRegistriesAreEmpty := len(registries) == 0 - var candidates []string // Local short-name resolution. namedCandidates, err := shortnames.ResolveLocally(sc, name) if err != nil { - return nil, "", false, err + return nil, "", err } for _, named := range namedCandidates { candidates = append(candidates, named.String()) } - return candidates, DefaultTransport, searchRegistriesAreEmpty, nil + return candidates, DefaultTransport, nil } // ExpandNames takes unqualified names, parses them as image names, and returns @@ -131,7 +110,7 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora expanded := make([]string, 0, len(names)) for _, n := range names { var name reference.Named - nameList, _, _, err := resolveName(n, systemContext, store) + nameList, _, err := resolveName(n, systemContext, store) if err != nil { return nil, errors.Wrapf(err, "error parsing name %q", n) } @@ -182,7 +161,7 @@ func ResolveNameToReferences( systemContext *types.SystemContext, image string, ) (refs []types.ImageReference, err error) { - names, transport, _, err := resolveName(image, systemContext, store) + names, transport, err := resolveName(image, systemContext, store) if err != nil { return nil, errors.Wrapf(err, "error parsing name %q", image) } diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 4f5c7d0a16d..01cedc7ed49 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/containers/common/libimage/manifests" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/retry" "github.com/containers/image/v5/copy" @@ -26,8 +27,10 @@ const ( ) // LookupReferenceFunc return an image reference based on the specified one. -// This can be used to pass custom blob caches to the copy operation. -type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc = manifests.LookupReferenceFunc // CopyOptions allow for customizing image-copy operations. type CopyOptions struct { @@ -144,15 +147,13 @@ type copier struct { destinationLookup LookupReferenceFunc } -var ( - // storageAllowedPolicyScopes overrides the policy for local storage - // to ensure that we can read images from it. - storageAllowedPolicyScopes = signature.PolicyTransportScopes{ - "": []signature.PolicyRequirement{ - signature.NewPRInsecureAcceptAnything(), - }, - } -) +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} // getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns // nil if no credentials are set. @@ -218,15 +219,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags - if options.Architecture != "" { - c.systemContext.ArchitectureChoice = options.Architecture - } - if options.OS != "" { - c.systemContext.OSChoice = options.OS - } - if options.Variant != "" { - c.systemContext.VariantChoice = options.Variant - } + c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = NormalizePlatform(options.OS, options.Architecture, options.Variant) if options.SignaturePolicyPath != "" { c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath diff --git a/vendor/github.com/containers/common/libimage/download.go b/vendor/github.com/containers/common/libimage/download.go deleted file mode 100644 index 54edf1b9a4f..00000000000 --- a/vendor/github.com/containers/common/libimage/download.go +++ /dev/null @@ -1,46 +0,0 @@ -package libimage - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - - "github.com/pkg/errors" -) - -// tmpdir returns a path to a temporary directory. -func tmpdir() string { - tmpdir := os.Getenv("TMPDIR") - if tmpdir == "" { - tmpdir = "/var/tmp" - } - - return tmpdir -} - -// downloadFromURL downloads an image in the format "https:/example.com/myimage.tar" -// and temporarily saves in it $TMPDIR/importxyz, which is deleted after the image is imported -func (r *Runtime) downloadFromURL(source string) (string, error) { - fmt.Printf("Downloading from %q\n", source) - - outFile, err := ioutil.TempFile(r.systemContext.BigFilesTemporaryDir, "import") - if err != nil { - return "", errors.Wrap(err, "error creating file") - } - defer outFile.Close() - - response, err := http.Get(source) // nolint:noctx - if err != nil { - return "", errors.Wrapf(err, "error downloading %q", source) - } - defer response.Body.Close() - - _, err = io.Copy(outFile, response.Body) - if err != nil { - return "", errors.Wrapf(err, "error saving %s to %s", source, outFile.Name()) - } - - return outFile.Name(), nil -} diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 833f940cca6..f9f73f527ef 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -3,13 +3,14 @@ package libimage import ( "context" "fmt" - "path/filepath" + "path" "strconv" "strings" "time" filtersPkg "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/timetype" + "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,26 +19,53 @@ import ( // indicates that the image matches the criteria. type filterFunc func(*Image) (bool, error) +// Apply the specified filters. At least one filter of each key must apply. +func (i *Image) applyFilters(filters map[string][]filterFunc) (bool, error) { + matches := false + for key := range filters { // and + matches = false + for _, filter := range filters[key] { // or + var err error + matches, err = filter(i) + if err != nil { + // Some images may have been corrupted in the + // meantime, so do an extra check and make the + // error non-fatal (see containers/podman/issues/12582). + if errCorrupted := i.isCorrupted(""); errCorrupted != nil { + logrus.Errorf(errCorrupted.Error()) + return false, nil + } + return false, err + } + if matches { + break + } + } + if !matches { + return false, nil + } + } + return matches, nil +} + // filterImages returns a slice of images which are passing all specified // filters. -func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) { - if len(filters) == 0 { +func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *ListImagesOptions) ([]*Image, error) { + if len(options.Filters) == 0 || len(images) == 0 { return images, nil } + + filters, err := r.compileImageFilters(ctx, options) + if err != nil { + return nil, err + } result := []*Image{} for i := range images { - include := true - var err error - for _, filter := range filters { - include, err = filter(images[i]) - if err != nil { - return nil, err - } - if !include { - break - } + match, err := images[i].applyFilters(filters) + if err != nil { + return nil, err } - if include { + if match { result = append(result, images[i]) } } @@ -47,15 +75,35 @@ func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) { // compileImageFilters creates `filterFunc`s for the specified filters. The // required format is `key=value` with the following supported keys: // after, since, before, containers, dangling, id, label, readonly, reference, intermediate -func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) ([]filterFunc, error) { +func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (map[string][]filterFunc, error) { logrus.Tracef("Parsing image filters %s", options.Filters) - filterFuncs := []filterFunc{} - for _, filter := range options.Filters { + var tree *layerTree + getTree := func() (*layerTree, error) { + if tree == nil { + t, err := r.layerTree() + if err != nil { + return nil, err + } + tree = t + } + return tree, nil + } + + filters := map[string][]filterFunc{} + duplicate := map[string]string{} + for _, f := range options.Filters { var key, value string - split := strings.SplitN(filter, "=", 2) - if len(split) != 2 { - return nil, errors.Errorf("invalid image filter %q: must be in the format %q", filter, "filter=value") + var filter filterFunc + negate := false + split := strings.SplitN(f, "!=", 2) + if len(split) == 2 { + negate = true + } else { + split = strings.SplitN(f, "=", 2) + if len(split) != 2 { + return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value or filter!=value") + } } key = split[0] @@ -63,99 +111,197 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp switch key { case "after", "since": - img, _, err := r.LookupImage(value, nil) + img, err := r.time(key, value) if err != nil { - return nil, errors.Wrapf(err, "could not find local image for filter %q", filter) + return nil, err } - filterFuncs = append(filterFuncs, filterAfter(img.Created())) + key = "since" + filter = filterAfter(img.Created()) case "before": - img, _, err := r.LookupImage(value, nil) + img, err := r.time(key, value) if err != nil { - return nil, errors.Wrapf(err, "could not find local image for filter %q", filter) + return nil, err } - filterFuncs = append(filterFuncs, filterBefore(img.Created())) + filter = filterBefore(img.Created()) case "containers": - switch value { - case "false", "true": - case "external": - if options.IsExternalContainerFunc == nil { - return nil, fmt.Errorf("libimage error: external containers filter without callback") - } - default: - return nil, fmt.Errorf("unsupported value %q for containers filter", value) + if err := r.containers(duplicate, key, value, options.IsExternalContainerFunc); err != nil { + return nil, err } - filterFuncs = append(filterFuncs, filterContainers(value, options.IsExternalContainerFunc)) + filter = filterContainers(value, options.IsExternalContainerFunc) case "dangling": - dangling, err := strconv.ParseBool(value) + dangling, err := r.bool(duplicate, key, value) if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for dangling filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterDangling(ctx, dangling)) + t, err := getTree() + if err != nil { + return nil, err + } + + filter = filterDangling(ctx, dangling, t) case "id": - filterFuncs = append(filterFuncs, filterID(value)) + filter = filterID(value) case "intermediate": - intermediate, err := strconv.ParseBool(value) + intermediate, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + t, err := getTree() if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for intermediate filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterIntermediate(ctx, intermediate)) + + filter = filterIntermediate(ctx, intermediate, t) case "label": - filterFuncs = append(filterFuncs, filterLabel(ctx, value)) + filter = filterLabel(ctx, value) case "readonly": - readOnly, err := strconv.ParseBool(value) + readOnly, err := r.bool(duplicate, key, value) if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for readonly filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterReadOnly(readOnly)) - - case "reference": - filterFuncs = append(filterFuncs, filterReference(value)) + filter = filterReadOnly(readOnly) - case "until": - ts, err := timetype.GetTimestamp(value, time.Now()) + case "manifest": + manifest, err := r.bool(duplicate, key, value) if err != nil { return nil, err } - seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + filter = filterManifest(ctx, manifest) + + case "reference": + filter = filterReferences(value) + + case "until": + until, err := r.until(value) if err != nil { return nil, err } - until := time.Unix(seconds, nanoseconds) - filterFuncs = append(filterFuncs, filterBefore(until)) + filter = filterBefore(until) default: return nil, errors.Errorf("unsupported image filter %q", key) } + if negate { + filter = negateFilter(filter) + } + filters[key] = append(filters[key], filter) + } + + return filters, nil +} + +func negateFilter(f filterFunc) filterFunc { + return func(img *Image) (bool, error) { + b, err := f(img) + return !b, err + } +} + +func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error { + if exists, ok := duplicate[key]; ok && exists != value { + return errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + switch value { + case "false", "true": + case "external": + if externalFunc == nil { + return fmt.Errorf("libimage error: external containers filter without callback") + } + default: + return fmt.Errorf("unsupported value %q for containers filter", value) + } + return nil +} + +func (r *Runtime) until(value string) (time.Time, error) { + var until time.Time + ts, err := timetype.GetTimestamp(value, time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + return time.Unix(seconds, nanoseconds), nil +} + +func (r *Runtime) time(key, value string) (*Image, error) { + img, _, err := r.LookupImage(value, nil) + if err != nil { + return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value) } + return img, nil +} - return filterFuncs, nil +func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) { + if exists, ok := duplicate[key]; ok && exists != value { + return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + set, err := strconv.ParseBool(value) + if err != nil { + return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value) + } + return set, nil } -// filterReference creates a reference filter for matching the specified value. -func filterReference(value string) filterFunc { - // Replacing all '/' with '|' so that filepath.Match() can work '|' - // character is not valid in image name, so this is safe. - // - // TODO: this has been copied from Podman and requires some more review - // and especially tests. - filter := fmt.Sprintf("*%s*", value) - filter = strings.ReplaceAll(filter, "/", "|") +// filterManifest filters whether or not the image is a manifest list +func filterManifest(ctx context.Context, value bool) filterFunc { return func(img *Image) (bool, error) { - if len(value) < 1 { - return true, nil + isManifestList, err := img.IsManifestList(ctx) + if err != nil { + return false, err } - for _, name := range img.Names() { - newName := strings.ReplaceAll(name, "/", "|") - match, _ := filepath.Match(filter, newName) - if match { - return true, nil + return isManifestList == value, nil + } +} + +// filterReferences creates a reference filter for matching the specified value. +func filterReferences(value string) filterFunc { + return func(img *Image) (bool, error) { + refs, err := img.NamesReferences() + if err != nil { + return false, err + } + + for _, ref := range refs { + refString := ref.String() // FQN with tag/digest + candidates := []string{refString} + + // Split the reference into 3 components (twice if diggested/tagged): + // 1) Fully-qualified reference + // 2) Without domain + // 3) Without domain and path + if named, isNamed := ref.(reference.Named); isNamed { + candidates = append(candidates, + reference.Path(named), // path/name without tag/digest (Path() removes it) + refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest + + trimmedString := reference.TrimNamed(named).String() + if refString != trimmedString { + tagOrDigest := refString[len(trimmedString):] + candidates = append(candidates, + trimmedString, // FQN without tag/digest + reference.Path(named)+tagOrDigest, // path/name with tag/digest + trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest + } + } + + for _, candidate := range candidates { + // path.Match() is also used by Docker's reference.FamiliarMatch(). + matched, _ := path.Match(value, candidate) + if matched { + return true, nil + } } } return false, nil @@ -221,9 +367,9 @@ func filterContainers(value string, fn IsExternalContainerFunc) filterFunc { } // filterDangling creates a dangling filter for matching the specified value. -func filterDangling(ctx context.Context, value bool) filterFunc { +func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc { return func(img *Image) (bool, error) { - isDangling, err := img.IsDangling(ctx) + isDangling, err := img.isDangling(ctx, tree) if err != nil { return false, err } @@ -241,9 +387,9 @@ func filterID(value string) filterFunc { // filterIntermediate creates an intermediate filter for images. An image is // considered to be an intermediate image if it is dangling (i.e., no tags) and // has no children (i.e., no other image depends on it). -func filterIntermediate(ctx context.Context, value bool) filterFunc { +func filterIntermediate(ctx context.Context, value bool, tree *layerTree) filterFunc { return func(img *Image) (bool, error) { - isIntermediate, err := img.IsIntermediate(ctx) + isIntermediate, err := img.isIntermediate(ctx, tree) if err != nil { return false, err } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 00a2d620ef4..661ca159b48 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -44,6 +44,8 @@ type Image struct { completeInspectData *ImageData // Corresponding OCI image. ociv1Image *ociv1.Image + // Names() parsed into references. + namesReferences []reference.Reference } } @@ -59,6 +61,7 @@ func (i *Image) reload() error { i.cached.partialInspectData = nil i.cached.completeInspectData = nil i.cached.ociv1Image = nil + i.cached.namesReferences = nil return nil } @@ -89,6 +92,23 @@ func (i *Image) Names() []string { return i.storageImage.Names } +// NamesReferences returns Names() as references. +func (i *Image) NamesReferences() ([]reference.Reference, error) { + if i.cached.namesReferences != nil { + return i.cached.namesReferences, nil + } + refs := make([]reference.Reference, 0, len(i.Names())) + for _, name := range i.Names() { + ref, err := reference.Parse(name) + if err != nil { + return nil, err + } + refs = append(refs, ref) + } + i.cached.namesReferences = refs + return refs, nil +} + // StorageImage returns the underlying storage.Image. func (i *Image) StorageImage() *storage.Image { return i.storageImage @@ -128,10 +148,16 @@ func (i *Image) IsReadOnly() bool { // IsDangling returns true if the image is dangling, that is an untagged image // without children. func (i *Image) IsDangling(ctx context.Context) (bool, error) { + return i.isDangling(ctx, nil) +} + +// isDangling returns true if the image is dangling, that is an untagged image +// without children. If tree is nil, it will created for this invocation only. +func (i *Image) isDangling(ctx context.Context, tree *layerTree) (bool, error) { if len(i.Names()) > 0 { return false, nil } - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, tree) if err != nil { return false, err } @@ -141,10 +167,17 @@ func (i *Image) IsDangling(ctx context.Context) (bool, error) { // IsIntermediate returns true if the image is an intermediate image, that is // an untagged image with children. func (i *Image) IsIntermediate(ctx context.Context) (bool, error) { + return i.isIntermediate(ctx, nil) +} + +// isIntermediate returns true if the image is an intermediate image, that is +// an untagged image with children. If tree is nil, it will created for this +// invocation only. +func (i *Image) isIntermediate(ctx context.Context, tree *layerTree) (bool, error) { if len(i.Names()) > 0 { return false, nil } - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, tree) if err != nil { return false, err } @@ -189,7 +222,7 @@ func (i *Image) Parent(ctx context.Context) (*Image, error) { // HasChildren returns indicates if the image has children. func (i *Image) HasChildren(ctx context.Context) (bool, error) { - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, nil) if err != nil { return false, err } @@ -198,7 +231,7 @@ func (i *Image) HasChildren(ctx context.Context) (bool, error) { // Children returns the image's children. func (i *Image) Children(ctx context.Context) ([]*Image, error) { - children, err := i.getChildren(ctx, true) + children, err := i.getChildren(ctx, true, nil) if err != nil { return nil, err } @@ -206,13 +239,16 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) { } // getChildren returns a list of imageIDs that depend on the image. If all is -// false, only the first child image is returned. -func (i *Image) getChildren(ctx context.Context, all bool) ([]*Image, error) { - tree, err := i.runtime.layerTree() - if err != nil { - return nil, err +// false, only the first child image is returned. If tree is nil, it will be +// created for this invocation only. +func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { + if tree == nil { + t, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + tree = t } - return tree.children(ctx, i, all) } @@ -608,8 +644,7 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) { } // inRepoTags looks for the specified name/tag pair in the image's repo tags. -// Note that tag may be empty. -func (i *Image) inRepoTags(name, tag string) (reference.Named, error) { +func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) { repoTags, err := i.NamedRepoTags() if err != nil { return nil, err @@ -620,8 +655,10 @@ func (i *Image) inRepoTags(name, tag string) (reference.Named, error) { return nil, err } + name := namedTagged.Name() + tag := namedTagged.Tag() for _, pair := range pairs { - if tag != "" && tag != pair.Tag { + if tag != pair.Tag { continue } if !strings.HasSuffix(pair.Name, name) { diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index 14020244050..683a2dc9806 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -95,9 +95,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // For now: we only support key=value // We will attempt to strip quotation marks if present. - var ( - key, val string - ) + var key, val string splitEnv := strings.SplitN(value, "=", 2) key = splitEnv[0] diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index bcfb4e1295f..3db392784e9 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -2,9 +2,11 @@ package libimage import ( "context" + "fmt" "net/url" "os" + "github.com/containers/common/pkg/download" storageTransport "github.com/containers/image/v5/storage" tarballTransport "github.com/containers/image/v5/tarball" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -47,21 +49,23 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption ic = config.ImageConfig } - hist := []v1.History{ + history := []v1.History{ {Comment: options.CommitMessage}, } config := v1.Image{ Config: ic, - History: hist, + History: history, OS: options.OS, Architecture: options.Arch, + Variant: options.Variant, } u, err := url.ParseRequestURI(path) if err == nil && u.Scheme != "" { // If source is a URL, download the file. - file, err := r.downloadFromURL(path) + fmt.Printf("Downloading from %q\n", path) + file, err := download.FromURL(r.systemContext.BigFilesTemporaryDir, path) if err != nil { return "", err } diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index a872e5cf9d1..05d60edfc1e 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -50,19 +50,39 @@ type RootFS struct { Layers []digest.Digest `json:"Layers"` } -// Inspect inspects the image. Use `withSize` to also perform the -// comparatively expensive size computation of the image. -func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) { +// InspectOptions allow for customizing inspecting images. +type InspectOptions struct { + // Compute the size of the image (expensive). + WithSize bool + // Compute the parent of the image (expensive). + WithParent bool +} + +// Inspect inspects the image. +func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageData, error) { logrus.Debugf("Inspecting image %s", i.ID()) + if options == nil { + options = &InspectOptions{} + } + if i.cached.completeInspectData != nil { - if withSize && i.cached.completeInspectData.Size == int64(-1) { + if options.WithSize && i.cached.completeInspectData.Size == int64(-1) { size, err := i.Size() if err != nil { return nil, err } i.cached.completeInspectData.Size = size } + if options.WithParent && i.cached.completeInspectData.Parent == "" { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + i.cached.completeInspectData.Parent = parentImage.ID() + } + } return i.cached.completeInspectData, nil } @@ -75,10 +95,7 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) if err != nil { return nil, err } - parentImage, err := i.Parent(ctx) - if err != nil { - return nil, err - } + repoTags, err := i.RepoTags() if err != nil { return nil, err @@ -93,7 +110,7 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) } size := int64(-1) - if withSize { + if options.WithSize { size, err = i.Size() if err != nil { return nil, err @@ -124,8 +141,14 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) NamesHistory: i.NamesHistory(), } - if parentImage != nil { - data.Parent = parentImage.ID() + if options.WithParent { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + data.Parent = parentImage.ID() + } } // Determine the format of the image. How we determine certain data @@ -164,7 +187,12 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) return nil, err } data.Comment = dockerManifest.Comment + // NOTE: Health checks may be listed in the container config or + // the config. data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck + if data.HealthCheck == nil { + data.HealthCheck = dockerManifest.Config.Healthcheck + } } if data.Annotations == nil { @@ -185,7 +213,6 @@ func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error ref, err := i.StorageReference() if err != nil { - return nil, err } diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 74a1870e0ff..4dfac710654 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -35,17 +35,6 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( var loadErrors []error for _, f := range []func() ([]string, string, error){ - // DOCKER-ARCHIVE - must be first (see containers/podman/issues/10809) - func() ([]string, string, error) { - logrus.Debugf("-> Attempting to load %q as a Docker archive", path) - ref, err := dockerArchiveTransport.ParseReference(path) - if err != nil { - return nil, dockerArchiveTransport.Transport.Name(), err - } - images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) - return images, dockerArchiveTransport.Transport.Name(), err - }, - // OCI func() ([]string, string, error) { logrus.Debugf("-> Attempting to load %q as an OCI directory", path) @@ -68,6 +57,17 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( return images, ociArchiveTransport.Transport.Name(), err }, + // DOCKER-ARCHIVE + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as a Docker archive", path) + ref, err := dockerArchiveTransport.ParseReference(path) + if err != nil { + return nil, dockerArchiveTransport.Transport.Name(), err + } + images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) + return images, dockerArchiveTransport.Transport.Name(), err + }, + // DIR func() ([]string, string, error) { logrus.Debugf("-> Attempting to load %q as a Docker dir", path) diff --git a/vendor/github.com/containers/common/libimage/manifests/copy.go b/vendor/github.com/containers/common/libimage/manifests/copy.go index 7e651a46c02..578b64ca838 100644 --- a/vendor/github.com/containers/common/libimage/manifests/copy.go +++ b/vendor/github.com/containers/common/libimage/manifests/copy.go @@ -4,12 +4,10 @@ import ( "github.com/containers/image/v5/signature" ) -var ( - // storageAllowedPolicyScopes overrides the policy for local storage - // to ensure that we can read images from it. - storageAllowedPolicyScopes = signature.PolicyTransportScopes{ - "": []signature.PolicyRequirement{ - signature.NewPRInsecureAcceptAnything(), - }, - } -) +// storageAllowedPolicyScopes overrides the policy for local storage +// to ensure that we can read images from it. +var storageAllowedPolicyScopes = signature.PolicyTransportScopes{ + "": []signature.PolicyRequirement{ + signature.NewPRInsecureAcceptAnything(), + }, +} diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 8d1abfba9a3..2624dee785a 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -27,6 +27,12 @@ import ( const instancesData = "instances.json" +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) + // ErrListImageUnknown is returned when we attempt to create an image reference // for a List that has not yet been saved to an image. var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") @@ -57,6 +63,7 @@ type PushOptions struct { SignBy string // fingerprint of GPG key to use to sign images RemoveSignatures bool // true to discard signatures in images ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 + SourceFilter LookupReferenceFunc // filter the list source } // Create creates a new list containing information about the specified image, @@ -221,6 +228,11 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push if err != nil { return nil, "", err } + if options.SourceFilter != nil { + if src, err = options.SourceFilter(src); err != nil { + return nil, "", err + } + } copyOptions := &cp.Options{ ImageListSelection: options.ImageListSelection, Instances: options.Instances, @@ -353,9 +365,12 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } if instanceInfo.OS == "" { instanceInfo.OS = config.OS + instanceInfo.OSVersion = config.OSVersion + instanceInfo.OSFeatures = config.OSFeatures } if instanceInfo.Architecture == "" { instanceInfo.Architecture = config.Architecture + instanceInfo.Variant = config.Variant } } manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) @@ -369,10 +384,8 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } instanceInfo.instanceDigest = &manifestDigest instanceInfo.Size = int64(len(manifestBytes)) - } else { - if manifestDigest == "" { - manifestDigest = *instanceInfo.instanceDigest - } + } else if manifestDigest == "" { + manifestDigest = *instanceInfo.instanceDigest } err = l.List.AddInstance(*instanceInfo.instanceDigest, instanceInfo.Size, manifestType, instanceInfo.OS, instanceInfo.Architecture, instanceInfo.OSVersion, instanceInfo.OSFeatures, instanceInfo.Variant, instanceInfo.Features, instanceInfo.Annotations) if err != nil { @@ -390,9 +403,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag func (l *list) Remove(instanceDigest digest.Digest) error { err := l.List.Remove(instanceDigest) if err == nil { - if _, needToDelete := l.instances[instanceDigest]; needToDelete { - delete(l.instances, instanceDigest) - } + delete(l.instances, instanceDigest) } return err } diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index bfea807c8b0..7ceb6283063 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,13 +1,51 @@ package libimage import ( + "runtime" "strings" + "github.com/containerd/containerd/platforms" "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +// NormalizePlatform normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will not be normalized. +func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + os, arch, variant = rawOS, rawArch, rawVariant + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + rawPlatform := os + "/" + arch + if variant != "" { + rawPlatform += "/" + variant + } + + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + // NormalizeName normalizes the provided name according to the conventions by // Podman and Buildah. If tag and digest are missing, the "latest" tag will be // used. If it's a short name, it will be prefixed with "localhost/". diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 1c322c37e84..ff93b6ed888 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "runtime" "strings" "time" @@ -21,6 +22,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" + ociSpec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -169,6 +171,20 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP return localImages, pullError } +// nameFromAnnotations returns a reference string to be used as an image name, +// or an empty string. The annotations map may be nil. +func nameFromAnnotations(annotations map[string]string) string { + if annotations == nil { + return "" + } + // buildkit/containerd are using a custom annotation see + // containers/podman/issues/12560. + if annotations["io.containerd.image.name"] != "" { + return annotations["io.containerd.image.name"] + } + return annotations[ociSpec.AnnotationRefName] +} + // copyFromDefault is the default copier for a number of transports. Other // transports require some specific dancing, sometimes Yoga. func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { @@ -201,15 +217,16 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, err } - // if index.json has no reference name, compute the image ID instead - if manifestDescriptor.Annotations == nil || manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] == "" { + storageName = nameFromAnnotations(manifestDescriptor.Annotations) + switch len(storageName) { + case 0: + // If there's no reference name in the annotations, compute an ID. storageName, err = getImageID(ctx, ref, nil) if err != nil { return nil, err } imageName = "sha256:" + storageName[1:] - } else { - storageName = manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] + default: named, err := NormalizeName(storageName) if err != nil { return nil, err @@ -227,8 +244,14 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, imageName = named.String() default: - storageName = toLocalImageName(ref.StringWithinTransport()) - imageName = storageName + // Path-based transports (e.g., dir) may include invalid + // characters, so we should pessimistically generate an ID + // instead of looking at the StringWithinTransport(). + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] } // Create a storage reference. @@ -424,12 +447,18 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str // If there's already a local image "localhost/foo", then we should // attempt pulling that instead of doing the full short-name dance. // - // NOTE: we must ignore the platform of a local image when doing - // lookups here, even if arch/os/variant is set. Some images set an - // incorrect or even invalid platform (see containers/podman/issues/10682). - // Doing the lookup while ignoring the platform checks prevents - // redundantly downloading the same image. - localImage, resolvedImageName, err = r.LookupImage(imageName, nil) + // NOTE that we only do platform checks if the specified values differ + // from the local platform. Unfortunately, there are many images used + // in the wild which don't set the correct value(s) in the config + // causing various issues such as containers/podman/issues/10682. + lookupImageOptions := &LookupImageOptions{Variant: options.Variant} + if options.Architecture != runtime.GOARCH { + lookupImageOptions.Architecture = options.Architecture + } + if options.OS != runtime.GOOS { + lookupImageOptions.OS = options.OS + } + localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions) if err != nil && errors.Cause(err) != storage.ErrImageUnknown { logrus.Errorf("Looking up %s in local storage: %v", imageName, err) } @@ -442,45 +471,26 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str } } - customPlatform := false - if len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 { - customPlatform = true - // Unless the pull policy is "always", we must pessimistically assume - // that the local image has an invalid architecture (see - // containers/podman/issues/10682). Hence, whenever the user requests - // a custom platform, set the pull policy to "always" to make sure - // we're pulling down the image. + customPlatform := len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 + if customPlatform && pullPolicy != config.PullPolicyAlways && pullPolicy != config.PullPolicyNever { + // Unless the pull policy is always/never, we must + // pessimistically assume that the local image has an invalid + // architecture (see containers/podman/issues/10682). Hence, + // whenever the user requests a custom platform, set the pull + // policy to "newer" to make sure we're pulling down the + // correct image. // - // NOTE that this is will even override --pull={false,never}. This is - // very likely a bug but a consistent one in Podman/Buildah and should - // be addressed at a later point. - if pullPolicy != config.PullPolicyAlways { - switch { - // User input clearly refer to a local image. - case strings.HasPrefix(imageName, "localhost/"): - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "never", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyNever - - // Image resolved to a local one, so let's still have a - // look at the registries or aliases but use it - // otherwise. - case strings.HasPrefix(resolvedImageName, "localhost/"): - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "newer", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyNewer - - default: - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "always", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyAlways - } - } + // NOTE that this is will even override --pull={false,never}. + pullPolicy = config.PullPolicyNewer + logrus.Debugf("Enforcing pull policy to %q to pull custom platform (arch: %q, os: %q, variant: %q) - local image may mistakenly specify wrong platform", pullPolicy, options.Architecture, options.OS, options.Variant) } if pullPolicy == config.PullPolicyNever { if localImage != nil { - logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) + logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) return []string{resolvedImageName}, nil } - logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) + logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) return nil, errors.Wrap(storage.ErrImageUnknown, imageName) } @@ -518,6 +528,12 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str sys := r.systemContextCopy() resolved, err := shortnames.Resolve(sys, imageName) if err != nil { + // TODO: that is a too big of a hammer since we should only + // ignore errors that indicate that there's no alias and no + // USRs. Must be addressed in c/image first. + if localImage != nil && pullPolicy == config.PullPolicyNewer { + return []string{resolvedImageName}, nil + } return nil, err } diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 7f25df20053..974b50b50bb 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -21,6 +21,16 @@ import ( // Faster than the standard library, see https://github.com/json-iterator/go. var json = jsoniter.ConfigCompatibleWithStandardLibrary +// tmpdir returns a path to a temporary directory. +func tmpdir() string { + tmpdir := os.Getenv("TMPDIR") + if tmpdir == "" { + tmpdir = "/var/tmp" + } + + return tmpdir +} + // RuntimeOptions allow for creating a customized Runtime. type RuntimeOptions struct { // The base system context of the runtime which will be used throughout @@ -64,7 +74,7 @@ func (r *Runtime) SystemContext() *types.SystemContext { // Returns a copy of the runtime's system context. func (r *Runtime) systemContextCopy() *types.SystemContext { var sys types.SystemContext - deepcopy.Copy(&sys, &r.systemContext) + _ = deepcopy.Copy(&sys, &r.systemContext) return &sys } @@ -180,6 +190,8 @@ type LookupImageOptions struct { returnManifestIfNoInstance bool } +var errNoHexValue = errors.New("invalid format: no 64-byte hexadecimal value") + // Lookup Image looks up `name` in the local container storage. Returns the // image and the name it has been found with. Note that name may also use the // `containers-storage:` prefix used to refer to the containers-storage @@ -223,13 +235,29 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, name = normalizedName } + byDigest := false originalName := name - idByDigest := false if strings.HasPrefix(name, "sha256:") { - // Strip off the sha256 prefix so it can be parsed later on. - idByDigest = true + byDigest = true name = strings.TrimPrefix(name, "sha256:") } + byFullID := reference.IsFullIdentifier(name) + + if byDigest && !byFullID { + return nil, "", fmt.Errorf("%s: %v", originalName, errNoHexValue) + } + + // If the name clearly refers to a local image, try to look it up. + if byFullID || byDigest { + img, err := r.lookupImageInLocalStorage(originalName, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, originalName, nil + } + return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + } // Unless specified, set the platform specified in the system context // for later platform matching. Builder likes to set these things via @@ -243,28 +271,14 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, if options.Variant == "" { options.Variant = r.systemContext.VariantChoice } - - // First, check if we have an exact match in the storage. Maybe an ID - // or a fully-qualified image name. - img, err := r.lookupImageInLocalStorage(name, name, options) - if err != nil { - return nil, "", err - } - if img != nil { - return img, originalName, nil - } - - // If the name clearly referred to a local image, there's nothing we can - // do anymore. - if storageRef != nil || idByDigest { - return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) - } + // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). + options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) // Second, try out the candidates as resolved by shortnames. This takes // "localhost/" prefixed images into account as well. candidates, err := shortnames.ResolveLocally(&r.systemContext, name) if err != nil { - return nil, "", errors.Wrap(storage.ErrImageUnknown, originalName) + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) } // Backwards compat: normalize to docker.io as some users may very well // rely on that. @@ -282,7 +296,17 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, } } - return r.lookupImageInDigestsAndRepoTags(originalName, options) + // The specified name may refer to a short ID. Note that this *must* + // happen after the short-name expansion as done above. + img, err := r.lookupImageInLocalStorage(name, name, options) + if err != nil { + return nil, "", err + } + if img != nil { + return img, name, err + } + + return r.lookupImageInDigestsAndRepoTags(name, options) } // lookupImageInLocalStorage looks up the specified candidate for name in the @@ -379,41 +403,49 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm return nil, "", err } - if !shortnames.IsShortName(name) { - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, "", err - } - digested, hasDigest := named.(reference.Digested) - if !hasDigest { - return nil, "", errors.Wrap(storage.ErrImageUnknown, name) - } + ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed + if err != nil { + return nil, "", err + } + named, isNamed := ref.(reference.Named) + if !isNamed { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + digested, isDigested := named.(reference.Digested) + if isDigested { logrus.Debug("Looking for image with matching recorded digests") digest := digested.Digest() for _, image := range allImages { for _, d := range image.Digests() { - if d == digest { - return image, name, nil + if d != digest { + continue + } + // Also make sure that the matching image fits all criteria (e.g., manifest list). + if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil { + return nil, "", err } + return image, name, nil + } } + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + if !shortnames.IsShortName(name) { return nil, "", errors.Wrap(storage.ErrImageUnknown, name) } - // Podman compat: if we're looking for a short name but couldn't - // resolve it via the registries.conf dance, we need to look at *all* - // images and check if the name we're looking for matches a repo tag. - // Split the name into a repo/tag pair - split := strings.SplitN(name, ":", 2) - repo := split[0] - tag := "" - if len(split) == 2 { - tag = split[1] + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + namedTagged, isNammedTagged := named.(reference.NamedTagged) + if !isNammedTagged { + // NOTE: this should never happen since we already know it's + // not a digested reference. + return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown) } + for _, image := range allImages { - named, err := image.inRepoTags(repo, tag) + named, err := image.inRepoTags(namedTagged) if err != nil { return nil, "", err } @@ -477,13 +509,16 @@ func (r *Runtime) imageReferenceMatchesContext(ref types.ImageReference, options } if options.Architecture != "" && options.Architecture != data.Architecture { - return false, err + logrus.Debugf("architecture %q does not match architecture %q of image %s", options.Architecture, data.Architecture, ref) + return false, nil } if options.OS != "" && options.OS != data.Os { - return false, err + logrus.Debugf("OS %q does not match OS %q of image %s", options.OS, data.Os, ref) + return false, nil } if options.Variant != "" && options.Variant != data.Variant { - return false, err + logrus.Debugf("variant %q does not match variant %q of image %s", options.Variant, data.Variant, ref) + return false, nil } return true, nil @@ -539,16 +574,7 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI } } - var filters []filterFunc - if len(options.Filters) > 0 { - compiledFilters, err := r.compileImageFilters(ctx, options) - if err != nil { - return nil, err - } - filters = append(filters, compiledFilters...) - } - - return filterImages(images, filters) + return r.filterImages(ctx, images, options) } // RemoveImagesOptions allow for customizing image removal. @@ -566,6 +592,8 @@ type RemoveImagesOptions struct { // containers using a specific image. By default, all containers in // the local containers storage will be removed (if Force is set). RemoveContainerFunc RemoveContainerFunc + // Ignore if a specified image does not exist and do not throw an error. + Ignore bool // IsExternalContainerFunc allows for checking whether the specified // container is an external one (when containers=external filter is // used). The definition of an external container can be set by @@ -651,6 +679,9 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem for _, name := range names { img, resolvedName, err := r.LookupImage(name, lookupOptions) if err != nil { + if options.Ignore && errors.Is(err, storage.ErrImageUnknown) { + continue + } appendError(err) continue } diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index e1b8c3f75ba..fed86d4efce 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -68,7 +68,6 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string, } return errors.Errorf("unsupported format %q for saving images", format) - } // saveSingleImage saves the specified image name to the specified path. diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index df29bc7da7a..33a4776ce3f 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -58,6 +58,10 @@ type SearchOptions struct { InsecureSkipTLSVerify types.OptionalBool // ListTags returns the search result with available tags ListTags bool + // Registries to search if the specified term does not include a + // registry. If set, the unqualified-search registries in + // containers-registries.conf(5) are ignored. + Registries []string } // SearchFilter allows filtering images while searching. @@ -105,6 +109,10 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) { return sFilter, nil } +// Search searches term. If term includes a registry, only this registry will +// be used for searching. Otherwise, the unqualified-search registries in +// containers-registries.conf(5) or the ones specified in the options will be +// used. func (r *Runtime) Search(ctx context.Context, term string, options *SearchOptions) ([]SearchResult, error) { if options == nil { options = &SearchOptions{} @@ -117,10 +125,14 @@ func (r *Runtime) Search(ctx context.Context, term string, options *SearchOption // that we cannot use the reference parser from the containers/image // library as the search term may container arbitrary input such as // wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629. - if spl := strings.SplitN(term, "/", 2); len(spl) > 1 { - searchRegistries = append(searchRegistries, spl[0]) + spl := strings.SplitN(term, "/", 2) + switch { + case len(spl) > 1: + searchRegistries = []string{spl[0]} term = spl[1] - } else { + case len(options.Registries) > 0: + searchRegistries = options.Registries + default: regs, err := sysregistriesv2.UnqualifiedSearchRegistries(r.systemContextCopy()) if err != nil { return nil, err @@ -244,7 +256,7 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri name = index + "/library/" + results[i].Name } params := SearchResult{ - Index: index, + Index: registry, Name: name, Description: description, Official: official, @@ -284,8 +296,9 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr paramsArr := []SearchResult{} for i := 0; i < limit; i++ { params := SearchResult{ - Name: imageRef.DockerReference().Name(), - Tag: tags[i], + Name: imageRef.DockerReference().Name(), + Tag: tags[i], + Index: registry, } paramsArr = append(paramsArr, params) } diff --git a/vendor/github.com/containers/podman/v3/libpod/network/cni/README.md b/vendor/github.com/containers/common/libnetwork/cni/README.md similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/network/cni/README.md rename to vendor/github.com/containers/common/libnetwork/cni/README.md diff --git a/vendor/github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go similarity index 56% rename from vendor/github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go rename to vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go index 93d871767d6..bda7ed7d02f 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cni @@ -10,15 +11,16 @@ import ( "path/filepath" "strconv" "strings" - "syscall" "time" "github.com/containernetworking/cni/libcni" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/libpod/network/util" - pkgutil "github.com/containers/podman/v3/pkg/util" + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + pkgutil "github.com/containers/common/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string) (*types.Network, error) { @@ -43,12 +45,11 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str } } - f, err := os.Stat(confPath) + t, err := fileTime(confPath) if err != nil { return nil, err } - stat := f.Sys().(*syscall.Stat_t) - network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + network.Created = t firstPlugin := conf.Plugins[0] network.Driver = firstPlugin.Network.Type @@ -75,7 +76,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str network.Options["vlan"] = strconv.Itoa(bridge.Vlan) } - err = convertIPAMConfToNetwork(&network, bridge.IPAM, confPath) + err = convertIPAMConfToNetwork(&network, &bridge.IPAM, confPath) if err != nil { return nil, err } @@ -97,13 +98,13 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str network.Options["mode"] = vlan.Mode } - err = convertIPAMConfToNetwork(&network, vlan.IPAM, confPath) + err = convertIPAMConfToNetwork(&network, &vlan.IPAM, confPath) if err != nil { return nil, err } default: - // A warning would be good but users would get this warning everytime so keep this at info level. + // A warning would be good but users would get this warning every time so keep this at info level. logrus.Infof("Unsupported CNI config type %s in %s, this network can still be used but inspect or list cannot show all information", firstPlugin.Network.Type, confPath) } @@ -125,74 +126,77 @@ func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool { // convertIPAMConfToNetwork converts A cni IPAMConfig to libpod network subnets. // It returns an array of subnets and an extra bool if dhcp is configured. -func convertIPAMConfToNetwork(network *types.Network, ipam ipamConfig, confPath string) error { - if ipam.PluginType == types.DHCPIPAMDriver { - network.IPAMOptions["driver"] = types.DHCPIPAMDriver - return nil - } - - if ipam.PluginType != types.HostLocalIPAMDriver { - return errors.Errorf("unsupported ipam plugin %s in %s", ipam.PluginType, confPath) - } - - network.IPAMOptions["driver"] = types.HostLocalIPAMDriver - for _, r := range ipam.Ranges { - for _, ipam := range r { - s := types.Subnet{} - - // Do not use types.ParseCIDR() because we want the ip to be - // the network address and not a random ip in the sub. - _, sub, err := net.ParseCIDR(ipam.Subnet) - if err != nil { - return err - } - s.Subnet = types.IPNet{IPNet: *sub} - - // gateway - var gateway net.IP - if ipam.Gateway != "" { - gateway = net.ParseIP(ipam.Gateway) - if gateway == nil { - return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway) +func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath string) error { + switch ipam.PluginType { + case "": + network.IPAMOptions[types.Driver] = types.NoneIPAMDriver + case types.DHCPIPAMDriver: + network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver + case types.HostLocalIPAMDriver: + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + for _, r := range ipam.Ranges { + for _, ipam := range r { + s := types.Subnet{} + + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the sub. + _, sub, err := net.ParseCIDR(ipam.Subnet) + if err != nil { + return err } - // convert to 4 byte if ipv4 - ipv4 := gateway.To4() - if ipv4 != nil { - gateway = ipv4 + s.Subnet = types.IPNet{IPNet: *sub} + + // gateway + var gateway net.IP + if ipam.Gateway != "" { + gateway = net.ParseIP(ipam.Gateway) + if gateway == nil { + return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway) + } + // convert to 4 byte if ipv4 + util.NormalizeIP(&gateway) + } else if !network.Internal { + // only add a gateway address if the network is not internal + gateway, err = util.FirstIPInSubnet(sub) + if err != nil { + return errors.Errorf("failed to get first ip in subnet %s", sub.String()) + } } - } else if !network.Internal { - // only add a gateway address if the network is not internal - gateway, err = util.FirstIPInSubnet(sub) - if err != nil { - return errors.Errorf("failed to get first ip in subnet %s", sub.String()) + s.Gateway = gateway + + var rangeStart net.IP + var rangeEnd net.IP + if ipam.RangeStart != "" { + rangeStart = net.ParseIP(ipam.RangeStart) + if rangeStart == nil { + return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart) + } } - } - s.Gateway = gateway - - var rangeStart net.IP - var rangeEnd net.IP - if ipam.RangeStart != "" { - rangeStart = net.ParseIP(ipam.RangeStart) - if rangeStart == nil { - return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart) + if ipam.RangeEnd != "" { + rangeEnd = net.ParseIP(ipam.RangeEnd) + if rangeEnd == nil { + return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd) + } } - } - if ipam.RangeEnd != "" { - rangeEnd = net.ParseIP(ipam.RangeEnd) - if rangeEnd == nil { - return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd) + if rangeStart != nil || rangeEnd != nil { + s.LeaseRange = &types.LeaseRange{} + s.LeaseRange.StartIP = rangeStart + s.LeaseRange.EndIP = rangeEnd } + if util.IsIPv6(s.Subnet.IP) { + network.IPv6Enabled = true + } + network.Subnets = append(network.Subnets, s) } - if rangeStart != nil || rangeEnd != nil { - s.LeaseRange = &types.LeaseRange{} - s.LeaseRange.StartIP = rangeStart - s.LeaseRange.EndIP = rangeEnd - } - if util.IsIPv6(s.Subnet.IP) { - network.IPv6Enabled = true - } - network.Subnets = append(network.Subnets, s) } + default: + // This is not an error. While we only support certain ipam drivers, we + // cannot make it fail for unsupported ones. CNI is still able to use them, + // just our translation logic cannot convert this into a Network. + // For the same reason this is not warning, it would just be annoying for + // everyone using a unknown ipam driver. + logrus.Infof("unsupported ipam plugin %q in %s", ipam.PluginType, confPath) + network.IPAMOptions[types.Driver] = ipam.PluginType } return nil } @@ -220,59 +224,55 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ var ( routes []ipamRoute ipamRanges [][]ipamLocalHostRangeConf - ipamConf ipamConfig + ipamConf *ipamConfig err error ) - if len(network.Subnets) > 0 { + + ipamDriver := network.IPAMOptions[types.Driver] + switch ipamDriver { + case types.HostLocalIPAMDriver: + defIpv4Route := false + defIpv6Route := false for _, subnet := range network.Subnets { - route, err := newIPAMDefaultRoute(util.IsIPv6(subnet.Subnet.IP)) - if err != nil { - return nil, "", err - } - routes = append(routes, route) ipam := newIPAMLocalHostRange(subnet.Subnet, subnet.LeaseRange, subnet.Gateway) ipamRanges = append(ipamRanges, []ipamLocalHostRangeConf{*ipam}) - } - ipamConf = newIPAMHostLocalConf(routes, ipamRanges) - } else { - ipamConf = ipamConfig{PluginType: "dhcp"} - } - vlan := 0 - mtu := 0 - vlanPluginMode := "" - for k, v := range network.Options { - switch k { - case "mtu": - mtu, err = parseMTU(v) - if err != nil { - return nil, "", err - } - - case "vlan": - vlan, err = parseVlan(v) - if err != nil { - return nil, "", err - } + // only add default route for not internal networks + if !network.Internal { + ipv6 := util.IsIPv6(subnet.Subnet.IP) + if !ipv6 && defIpv4Route { + continue + } + if ipv6 && defIpv6Route { + continue + } - case "mode": - switch network.Driver { - case types.MacVLANNetworkDriver: - if !pkgutil.StringInSlice(v, []string{"", "bridge", "private", "vepa", "passthru"}) { - return nil, "", errors.Errorf("unknown macvlan mode %q", v) + if ipv6 { + defIpv6Route = true + } else { + defIpv4Route = true } - case types.IPVLANNetworkDriver: - if !pkgutil.StringInSlice(v, []string{"", "l2", "l3", "l3s"}) { - return nil, "", errors.Errorf("unknown ipvlan mode %q", v) + route, err := newIPAMDefaultRoute(ipv6) + if err != nil { + return nil, "", err } - default: - return nil, "", errors.Errorf("cannot set option \"mode\" with driver %q", network.Driver) + routes = append(routes, route) } - vlanPluginMode = v - - default: - return nil, "", errors.Errorf("unsupported network option %s", k) } + conf := newIPAMHostLocalConf(routes, ipamRanges) + ipamConf = &conf + case types.DHCPIPAMDriver: + ipamConf = &ipamConfig{PluginType: "dhcp"} + + case types.NoneIPAMDriver: + // do nothing + default: + return nil, "", errors.Errorf("unsupported ipam driver %q", ipamDriver) + } + + opts, err := parseOptions(network.Options, network.Driver) + if err != nil { + return nil, "", err } isGateway := true @@ -290,23 +290,19 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ switch network.Driver { case types.BridgeNetworkDriver: - bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, mtu, vlan, ipamConf) + bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, opts.mtu, opts.vlan, ipamConf) plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(), newTuningPlugin()) // if we find the dnsname plugin we add configuration for it if hasDNSNamePlugin(n.cniPluginDirs) && network.DNSEnabled { // Note: in the future we might like to allow for dynamic domain names plugins = append(plugins, newDNSNamePlugin(defaultPodmanDomainName)) } - // Add the podman-machine CNI plugin if we are in a machine - if n.isMachine { - plugins = append(plugins, newPodmanMachinePlugin()) - } case types.MacVLANNetworkDriver: - plugins = append(plugins, newVLANPlugin(types.MacVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, ipamConf)) + plugins = append(plugins, newVLANPlugin(types.MacVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf)) case types.IPVLANNetworkDriver: - plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, ipamConf)) + plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, opts.vlanPluginMode, opts.mtu, ipamConf)) default: return nil, "", errors.Errorf("driver %q is not supported by cni", network.Driver) @@ -319,16 +315,15 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ cniPathName := "" if writeToDisk { cniPathName = filepath.Join(n.cniConfigDir, network.Name+".conflist") - err = ioutil.WriteFile(cniPathName, b, 0644) + err = ioutil.WriteFile(cniPathName, b, 0o644) if err != nil { return nil, "", err } - f, err := os.Stat(cniPathName) + t, err := fileTime(cniPathName) if err != nil { return nil, "", err } - stat := f.Sys().(*syscall.Stat_t) - network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + network.Created = t } else { network.Created = time.Now() } @@ -339,36 +334,6 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ return config, cniPathName, nil } -// parseMTU parses the mtu option -func parseMTU(mtu string) (int, error) { - if mtu == "" { - return 0, nil // default - } - m, err := strconv.Atoi(mtu) - if err != nil { - return 0, err - } - if m < 0 { - return 0, errors.Errorf("mtu %d is less than zero", m) - } - return m, nil -} - -// parseVlan parses the vlan option -func parseVlan(vlan string) (int, error) { - if vlan == "" { - return 0, nil // default - } - v, err := strconv.Atoi(vlan) - if err != nil { - return 0, err - } - if v < 0 || v > 4094 { - return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v) - } - return v, nil -} - func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry, error) { cniPorts := make([]cniPortMapEntry, 0, len(ports)) for _, port := range ports { @@ -401,3 +366,73 @@ func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry } return cniPorts, nil } + +func removeMachinePlugin(conf *libcni.NetworkConfigList) *libcni.NetworkConfigList { + plugins := make([]*libcni.NetworkConfig, 0, len(conf.Plugins)) + for _, net := range conf.Plugins { + if net.Network.Type != "podman-machine" { + plugins = append(plugins, net) + } + } + conf.Plugins = plugins + return conf +} + +type options struct { + vlan int + mtu int + vlanPluginMode string +} + +func parseOptions(networkOptions map[string]string, networkDriver string) (*options, error) { + opt := &options{} + var err error + for k, v := range networkOptions { + switch k { + case "mtu": + opt.mtu, err = internalutil.ParseMTU(v) + if err != nil { + return nil, err + } + + case "vlan": + opt.vlan, err = internalutil.ParseVlan(v) + if err != nil { + return nil, err + } + + case "mode": + switch networkDriver { + case types.MacVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { + return nil, errors.Errorf("unknown macvlan mode %q", v) + } + case types.IPVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { + return nil, errors.Errorf("unknown ipvlan mode %q", v) + } + default: + return nil, errors.Errorf("cannot set option \"mode\" with driver %q", networkDriver) + } + opt.vlanPluginMode = v + + default: + return nil, errors.Errorf("unsupported network option %s", k) + } + } + return opt, nil +} + +func fileTime(file string) (time.Time, error) { + var st unix.Stat_t + for { + err := unix.Stat(file, &st) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return time.Time{}, &os.PathError{Path: file, Op: "stat", Err: err} + } + } + return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)), nil //nolint:unconvert // On some platforms Sec and Nsec are int32. +} diff --git a/vendor/github.com/containers/podman/v3/libpod/network/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go similarity index 95% rename from vendor/github.com/containers/podman/v3/libpod/network/cni/cni_exec.go rename to vendor/github.com/containers/common/libnetwork/cni/cni_exec.go index ae857bcfb7f..6bfa8d63b1a 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/cni/cni_exec.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go @@ -16,6 +16,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package cni @@ -30,7 +31,7 @@ import ( "github.com/containernetworking/cni/pkg/invoke" "github.com/containernetworking/cni/pkg/version" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/storage/pkg/unshare" ) type cniExec struct { @@ -75,7 +76,7 @@ func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [ // by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run. // Because of it dnsname will not find the config files and cannot correctly cleanup. // To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful. - if !rootless.IsRootless() { + if !unshare.IsRootless() { c.Env = append(c.Env, "XDG_RUNTIME_DIR=") } @@ -87,7 +88,7 @@ func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [ } // annotatePluginError parses the common cni plugin error json. -func annotatePluginError(err error, plugin string, stdout []byte, stderr []byte) error { +func annotatePluginError(err error, plugin string, stdout, stderr []byte) error { pluginName := filepath.Base(plugin) emsg := cniPluginError{ plugin: pluginName, diff --git a/vendor/github.com/containers/podman/v3/libpod/network/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go similarity index 90% rename from vendor/github.com/containers/podman/v3/libpod/network/cni/cni_types.go rename to vendor/github.com/containers/common/libnetwork/cni/cni_types.go index fbf917c2d3e..25cc173a67d 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/cni/cni_types.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cni @@ -7,7 +8,7 @@ import ( "os" "path/filepath" - "github.com/containers/podman/v3/libpod/network/types" + "github.com/containers/common/libnetwork/types" ) const ( @@ -110,12 +111,6 @@ type dnsNameConfig struct { Capabilities map[string]bool `json:"capabilities"` } -// podmanMachineConfig enables port handling on the host OS -type podmanMachineConfig struct { - PluginType string `json:"type"` - Capabilities map[string]bool `json:"capabilities"` -} - // ncList describes a generic map type ncList map[string]interface{} @@ -139,7 +134,7 @@ func newNcList(name, version string, labels, options map[string]string) ncList { } // newHostLocalBridge creates a new LocalBridge for host-local -func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu int, vlan int, ipamConf ipamConfig) *hostLocalBridge { +func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipamConf *ipamConfig) *hostLocalBridge { caps := make(map[string]bool) caps["ips"] = true bridge := hostLocalBridge{ @@ -150,11 +145,13 @@ func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu int, vlan int, MTU: mtu, HairpinMode: true, Vlan: vlan, - IPAM: ipamConf, } - // if we use host-local set the ips cap to ensure we can set static ips via runtime config - if ipamConf.PluginType == types.HostLocalIPAMDriver { - bridge.Capabilities = caps + if ipamConf != nil { + bridge.IPAM = *ipamConf + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipamConf.PluginType == types.HostLocalIPAMDriver { + bridge.Capabilities = caps + } } return &bridge } @@ -176,13 +173,13 @@ func newIPAMLocalHostRange(subnet types.IPNet, leaseRange *types.LeaseRange, gw Subnet: subnet.String(), } - // an user provided a range, we add it here + // a user provided a range, we add it here if leaseRange != nil { if leaseRange.StartIP != nil { hostRange.RangeStart = leaseRange.StartIP.String() } if leaseRange.EndIP != nil { - hostRange.RangeStart = leaseRange.EndIP.String() + hostRange.RangeEnd = leaseRange.EndIP.String() } } @@ -261,10 +258,12 @@ func hasDNSNamePlugin(paths []string) bool { } // newVLANPlugin creates a macvlanconfig with a given device name -func newVLANPlugin(pluginType, device, mode string, mtu int, ipam ipamConfig) VLANConfig { +func newVLANPlugin(pluginType, device, mode string, mtu int, ipam *ipamConfig) VLANConfig { m := VLANConfig{ PluginType: pluginType, - IPAM: ipam, + } + if ipam != nil { + m.IPAM = *ipam } if mtu > 0 { m.MTU = mtu @@ -285,12 +284,3 @@ func newVLANPlugin(pluginType, device, mode string, mtu int, ipam ipamConfig) VL } return m } - -func newPodmanMachinePlugin() podmanMachineConfig { - caps := make(map[string]bool, 1) - caps["portMappings"] = true - return podmanMachineConfig{ - PluginType: "podman-machine", - Capabilities: caps, - } -} diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go new file mode 100644 index 00000000000..c6967b600bf --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -0,0 +1,241 @@ +//go:build linux +// +build linux + +package cni + +import ( + "net" + "os" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +func (n *cniNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.libpodNet.Name] = network + return *network.libpodNet, nil +} + +// networkCreate will fill out the given network struct and return the new network entry. +// If defaultNet is true it will not validate against used subnets and it will not write the cni config to disk. +func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + err = validateIPAMDriver(newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools) + if err != nil { + return nil, err + } + case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: + err = createIPMACVLAN(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + err = internalutil.ValidateSubnets(newNetwork, !newNetwork.Internal, usedNetworks) + if err != nil { + return nil, err + } + + // generate the network ID + newNetwork.ID = getNetworkIDFromName(newNetwork.Name) + + // when we do not have ipam we must disable dns + internalutil.IpamNoneDisableDns(newNetwork) + + // FIXME: Should this be a hard error? + if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) { + logrus.Warnf("dnsname and internal networks are incompatible. dnsname plugin not configured for network %s", newNetwork.Name) + newNetwork.DNSEnabled = false + } + + cniConf, path, err := n.createCNIConfigListFromNetwork(newNetwork, !defaultNet) + if err != nil { + return nil, err + } + return &network{cniNet: cniConf, libpodNet: newNetwork, filename: path}, nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *cniNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.libpodNet.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + // Remove the bridge network interface on the host. + if network.libpodNet.Driver == types.BridgeNetworkDriver { + link, err := netlink.LinkByName(network.libpodNet.NetworkInterface) + if err == nil { + err = netlink.LinkDel(link) + // only log the error, it is not fatal + if err != nil { + logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err) + } + } + } + + file := network.filename + delete(n.networks, network.libpodNet.Name) + + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *cniNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net.libpodNet) { + continue outer + } + } + networks = append(networks, *net.libpodNet) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *cniNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network.libpodNet, nil +} + +func createIPMACVLAN(network *types.Network) error { + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + + switch network.IPAMOptions[types.Driver] { + // set default + case "": + if len(network.Subnets) == 0 { + // if no subnets and no driver choose dhcp + network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver + } else { + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + } + case types.HostLocalIPAMDriver: + if len(network.Subnets) == 0 { + return errors.New("host-local ipam driver set but no subnets are given") + } + } + + if network.IPAMOptions[types.Driver] == types.DHCPIPAMDriver && network.Internal { + return errors.New("internal is not supported with macvlan and dhcp ipam driver") + } + return nil +} + +func validateIPAMDriver(n *types.Network) error { + ipamDriver := n.IPAMOptions[types.Driver] + switch ipamDriver { + case "", types.HostLocalIPAMDriver: + case types.DHCPIPAMDriver, types.NoneIPAMDriver: + if len(n.Subnets) > 0 { + return errors.Errorf("%s ipam driver is set but subnets are given", ipamDriver) + } + default: + return errors.Errorf("unsupported ipam driver %q", ipamDriver) + } + return nil +} diff --git a/vendor/github.com/containers/podman/v3/libpod/network/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go similarity index 59% rename from vendor/github.com/containers/podman/v3/libpod/network/cni/network.go rename to vendor/github.com/containers/common/libnetwork/cni/network.go index a37a84373c1..82b9cbd2e50 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/cni/network.go +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cni @@ -6,17 +7,14 @@ import ( "context" "crypto/sha256" "encoding/hex" - "fmt" - "net" "os" + "path/filepath" "strings" "time" "github.com/containernetworking/cni/libcni" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/libpod/network/util" - pkgutil "github.com/containers/podman/v3/pkg/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" "github.com/containers/storage/pkg/lockfile" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -35,6 +33,9 @@ type cniNetwork struct { // defaultSubnet is the default subnet for the default network. defaultSubnet types.IPNet + // defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + defaultsubnetPools []config.SubnetPool + // isMachine describes whenever podman runs in a podman machine environment. isMachine bool @@ -66,18 +67,18 @@ type InitConfig struct { // DefaultSubnet is the default subnet for the default network. DefaultSubnet string + // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + DefaultsubnetPools []config.SubnetPool + // IsMachine describes whenever podman runs in a podman machine environment. IsMachine bool - - // LockFile is the path to lock file. - LockFile string } // NewCNINetworkInterface creates the ContainerNetwork interface for the CNI backend. // Note: The networks are not loaded from disk until a method is called. -func NewCNINetworkInterface(conf InitConfig) (types.ContainerNetwork, error) { +func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { // TODO: consider using a shared memory lock - lock, err := lockfile.GetLockfile(conf.LockFile) + lock, err := lockfile.GetLockfile(filepath.Join(conf.CNIConfigDir, "cni.lock")) if err != nil { return nil, err } @@ -96,15 +97,21 @@ func NewCNINetworkInterface(conf InitConfig) (types.ContainerNetwork, error) { return nil, errors.Wrap(err, "failed to parse default subnet") } + defaultSubnetPools := conf.DefaultsubnetPools + if defaultSubnetPools == nil { + defaultSubnetPools = config.DefaultSubnetPools + } + cni := libcni.NewCNIConfig(conf.CNIPluginDirs, &cniExec{}) n := &cniNetwork{ - cniConfigDir: conf.CNIConfigDir, - cniPluginDirs: conf.CNIPluginDirs, - cniConf: cni, - defaultNetwork: defaultNetworkName, - defaultSubnet: defaultNet, - isMachine: conf.IsMachine, - lock: lock, + cniConfigDir: conf.CNIConfigDir, + cniPluginDirs: conf.CNIPluginDirs, + cniConf: cni, + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + defaultsubnetPools: defaultSubnetPools, + isMachine: conf.IsMachine, + lock: lock, } return n, nil @@ -116,6 +123,11 @@ func (n *cniNetwork) Drivers() []string { return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver, types.IPVLANNetworkDriver} } +// DefaultNetworkName will return the default cni network name. +func (n *cniNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + func (n *cniNetwork) loadNetworks() error { // check the mod time of the config dir f, err := os.Stat(n.cniConfigDir) @@ -149,11 +161,18 @@ func (n *cniNetwork) loadNetworks() error { continue } - if !define.NameRegex.MatchString(conf.Name) { - logrus.Warnf("CNI config list %s has invalid name, skipping: %v", file, define.RegexError) + if !types.NameRegex.MatchString(conf.Name) { + logrus.Warnf("CNI config list %s has invalid name, skipping: %v", file, types.RegexError) continue } + // podman < v4.0 used the podman-machine cni plugin for podman machine port forwarding + // since this is now build into podman we no longer use the plugin + // old configs may still contain it so we just remove it here + if n.isMachine { + conf = removeMachinePlugin(conf) + } + if _, err := n.cniConf.ValidateNetworkList(context.Background(), conf); err != nil { logrus.Warnf("Error validating CNI config file %s: %v", file, err) continue @@ -201,7 +220,7 @@ func (n *cniNetwork) createDefaultNetwork() (*network, error) { {Subnet: n.defaultSubnet}, }, } - return n.networkCreate(net, true) + return n.networkCreate(&net, true) } // getNetwork will lookup a network by name or ID. It returns an @@ -232,7 +251,7 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) { if net != nil { return net, nil } - return nil, errors.Wrapf(define.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) } // getNetworkIDFromName creates a network ID from the name. It is just the @@ -242,111 +261,29 @@ func getNetworkIDFromName(name string) string { return hex.EncodeToString(hash[:]) } -// getFreeIPv6NetworkSubnet returns a unused ipv4 subnet -func (n *cniNetwork) getFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { - // the default podman network is 10.88.0.0/16 - // start locking for free /24 networks - network := &net.IPNet{ - IP: net.IP{10, 89, 0, 0}, - Mask: net.IPMask{255, 255, 255, 0}, - } - - // TODO: make sure to not use public subnets - for { - if intersectsConfig := util.NetworkIntersectsWithNetworks(network, usedNetworks); !intersectsConfig { - logrus.Debugf("found free ipv4 network subnet %s", network.String()) - return &types.Subnet{ - Subnet: types.IPNet{IPNet: *network}, - }, nil - } - var err error - network, err = util.NextSubnet(network) - if err != nil { - return nil, err - } - } -} - -// getFreeIPv6NetworkSubnet returns a unused ipv6 subnet -func (n *cniNetwork) getFreeIPv6NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { - // FIXME: Is 10000 fine as limit? We should prevent an endless loop. - for i := 0; i < 10000; i++ { - // RFC4193: Choose the ipv6 subnet random and NOT sequentially. - network, err := util.GetRandomIPv6Subnet() - if err != nil { - return nil, err - } - if intersectsConfig := util.NetworkIntersectsWithNetworks(&network, usedNetworks); !intersectsConfig { - logrus.Debugf("found free ipv6 network subnet %s", network.String()) - return &types.Subnet{ - Subnet: types.IPNet{IPNet: network}, - }, nil - } - } - return nil, errors.New("failed to get random ipv6 subnet") -} +// Implement the NetUtil interface for easy code sharing with other network interfaces. -// getUsedSubnets returns a list of all used subnets by network -// configs and interfaces on the host. -func (n *cniNetwork) getUsedSubnets() ([]*net.IPNet, error) { - // first, load all used subnets from network configs - subnets := make([]*net.IPNet, 0, len(n.networks)) +// ForEach call the given function for each network +func (n *cniNetwork) ForEach(run func(types.Network)) { for _, val := range n.networks { - for i := range val.libpodNet.Subnets { - subnets = append(subnets, &val.libpodNet.Subnets[i].Subnet.IPNet) - } + run(*val.libpodNet) } - // second, load networks from the current system - liveSubnets, err := util.GetLiveNetworkSubnets() - if err != nil { - return nil, err - } - return append(subnets, liveSubnets...), nil } -// getFreeDeviceName returns a free device name which can -// be used for new configs as name and bridge interface name -func (n *cniNetwork) getFreeDeviceName() (string, error) { - bridgeNames := n.getBridgeInterfaceNames() - netNames := n.getUsedNetworkNames() - liveInterfaces, err := util.GetLiveNetworkNames() - if err != nil { - return "", nil - } - names := make([]string, 0, len(bridgeNames)+len(netNames)+len(liveInterfaces)) - names = append(names, bridgeNames...) - names = append(names, netNames...) - names = append(names, liveInterfaces...) - // FIXME: Is a limit fine? - // Start by 1, 0 is reserved for the default network - for i := 1; i < 1000000; i++ { - deviceName := fmt.Sprintf("%s%d", cniDeviceName, i) - if !pkgutil.StringInSlice(deviceName, names) { - logrus.Debugf("found free device name %s", deviceName) - return deviceName, nil - } - } - return "", errors.New("could not find free device name, to many iterations") +// Len return the number of networks +func (n *cniNetwork) Len() int { + return len(n.networks) } -// getUsedNetworkNames returns all network names already used -// by network configs -func (n *cniNetwork) getUsedNetworkNames() []string { - names := make([]string, 0, len(n.networks)) - for _, val := range n.networks { - names = append(names, val.libpodNet.Name) - } - return names +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *cniNetwork) DefaultInterfaceName() string { + return cniDeviceName } -// getUsedNetworkNames returns all bridge device names already used -// by network configs -func (n *cniNetwork) getBridgeInterfaceNames() []string { - names := make([]string, 0, len(n.networks)) - for _, val := range n.networks { - if val.libpodNet.Driver == types.BridgeNetworkDriver { - names = append(names, val.libpodNet.NetworkInterface) - } +func (n *cniNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err } - return names + return network.libpodNet, err } diff --git a/vendor/github.com/containers/podman/v3/libpod/network/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go similarity index 74% rename from vendor/github.com/containers/podman/v3/libpod/network/cni/run.go rename to vendor/github.com/containers/common/libnetwork/cni/run.go index bd873f89b6f..c7fa86ed0aa 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/cni/run.go +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cni @@ -12,8 +13,8 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types" types040 "github.com/containernetworking/cni/pkg/types/040" "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/network/types" + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -30,24 +31,9 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma return nil, err } - if namespacePath == "" { - return nil, errors.New("namespacePath is empty") - } - if options.ContainerID == "" { - return nil, errors.New("ContainerID is empty") - } - if len(options.Networks) == 0 { - return nil, errors.New("must specify at least one network") - } - for name, netOpts := range options.Networks { - network := n.networks[name] - if network == nil { - return nil, errors.Wrapf(define.ErrNoSuchNetwork, "network %s", name) - } - err := validatePerNetworkOpts(network, netOpts) - if err != nil { - return nil, err - } + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err } // set the loopback adapter up in the container netns @@ -84,8 +70,9 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma results := make(map[string]types.StatusBlock, len(options.Networks)) for name, netOpts := range options.Networks { + netOpts := netOpts network := n.networks[name] - rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, netOpts) + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) // If we have more than one static ip we need parse the ips via runtime config, // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips @@ -138,68 +125,52 @@ func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { result.DNSSearchDomains = cniResult.DNS.Search interfaces := make(map[string]types.NetInterface) - for _, ip := range cniResult.IPs { - if ip.Interface == nil { - // we do no expect ips without an interface + for intIndex, netInterface := range cniResult.Interfaces { + // we are only interested about interfaces in the container namespace + if netInterface.Sandbox == "" { continue } - if len(cniResult.Interfaces) <= *ip.Interface { - return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface) + + mac, err := net.ParseMAC(netInterface.Mac) + if err != nil { + return result, err } - cniInt := cniResult.Interfaces[*ip.Interface] - netInt, ok := interfaces[cniInt.Name] - if ok { - netInt.Networks = append(netInt.Networks, types.NetAddress{ - Subnet: types.IPNet{IPNet: ip.Address}, - Gateway: ip.Gateway, - }) - interfaces[cniInt.Name] = netInt - } else { - mac, err := net.ParseMAC(cniInt.Mac) - if err != nil { - return result, err + subnets := make([]types.NetAddress, 0, len(cniResult.IPs)) + for _, ip := range cniResult.IPs { + if ip.Interface == nil { + // we do no expect ips without an interface + continue + } + if len(cniResult.Interfaces) <= *ip.Interface { + return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface) } - interfaces[cniInt.Name] = types.NetInterface{ - MacAddress: mac, - Networks: []types.NetAddress{{ - Subnet: types.IPNet{IPNet: ip.Address}, + + // when we have a ip for this interface add it to the subnets + if *ip.Interface == intIndex { + subnets = append(subnets, types.NetAddress{ + IPNet: types.IPNet{IPNet: ip.Address}, Gateway: ip.Gateway, - }}, + }) } } + interfaces[netInterface.Name] = types.NetInterface{ + MacAddress: types.HardwareAddr(mac), + Subnets: subnets, + } } result.Interfaces = interfaces return result, nil } -// validatePerNetworkOpts checks that all given static ips are in a subnet on this network -func validatePerNetworkOpts(network *network, netOpts types.PerNetworkOptions) error { - if netOpts.InterfaceName == "" { - return errors.Errorf("interface name on network %s is empty", network.libpodNet.Name) - } -outer: - for _, ip := range netOpts.StaticIPs { - for _, s := range network.libpodNet.Subnets { - if s.Subnet.Contains(ip) { - continue outer - } - } - return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.libpodNet.Name) - } - return nil -} - -func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPortMapEntry, opts types.PerNetworkOptions) *libcni.RuntimeConf { +func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPortMapEntry, opts *types.PerNetworkOptions) *libcni.RuntimeConf { rt := &libcni.RuntimeConf{ ContainerID: conID, NetNS: netns, IfName: opts.InterfaceName, Args: [][2]string{ {"IgnoreUnknown", "1"}, - // FIXME: Should we set the K8S args? - //{"K8S_POD_NAMESPACE", conName}, - //{"K8S_POD_INFRA_CONTAINER_ID", conID}, - // K8S_POD_NAME is used by dnsname to get the container name + // Do not set the K8S env vars, see https://github.com/containers/podman/issues/12083. + // Only K8S_POD_NAME is used by dnsname to get the container name. {"K8S_POD_NAME", conName}, }, CapabilityArgs: map[string]interface{}{}, @@ -264,7 +235,8 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption var multiErr *multierror.Error for name, netOpts := range options.Networks { - rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, netOpts) + netOpts := netOpts + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) if err == nil { @@ -273,7 +245,7 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) network := n.networks[name] if network == nil { - multiErr = multierror.Append(multiErr, errors.Wrapf(define.ErrNoSuchNetwork, "network %s", name)) + multiErr = multierror.Append(multiErr, errors.Wrapf(types.ErrNoSuchNetwork, "network %s", name)) continue } cniConfList = network.cniNet diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go new file mode 100644 index 00000000000..ce248a181ba --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -0,0 +1,339 @@ +package etchosts + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/util" +) + +const ( + hostContainersInternal = "host.containers.internal" + localhost = "localhost" +) + +type HostEntries []HostEntry + +type HostEntry struct { + IP string + Names []string +} + +// Params for the New() function call +type Params struct { + // BaseFile is the file where we read entries from and add entries to + // the target hosts file. If the name is empty it will not read any entries. + BaseFile string + // ExtraHosts is a slice of entries in the "hostname:ip" format. + // Optional. + ExtraHosts []string + // ContainerIPs should contain the main container ipv4 and ipv6 if available + // with the container name and host name as names set. + // Optional. + ContainerIPs HostEntries + // HostContainersInternalIP is the IP for the host.containers.internal entry. + // Optional. + HostContainersInternalIP string + // TargetFile where the hosts are written to. + TargetFile string +} + +// New will create a new hosts file and write this to the target file. +// This function does not prevent any kind of concurrency problems, it is +// the callers responsibility to avoid concurrent writes to this file. +// The extraHosts are written first, then the hosts from the file baseFile and the +// containerIps. The container ip entry is only added when the name was not already +// added before. +func New(params *Params) error { + if err := new(params); err != nil { + return fmt.Errorf("failed to create new hosts file: %w", err) + } + return nil +} + +// Add adds the given entries to the hosts file, entries are only added if +// they are not already present. +// Add is not atomic because it will keep the current file inode. This is +// required to keep bind mounts for containers working. +func Add(file string, entries HostEntries) error { + if err := add(file, entries); err != nil { + return fmt.Errorf("failed to add entries to hosts file: %w", err) + } + return nil +} + +// AddIfExists will add the given entries only if one of the existsEntries +// is in the hosts file. This API is required for podman network connect. +// Since we want to add the same host name for each network ip we want to +// add duplicates and the normal Add() call prevents us from doing so. +// However since we also do not want to overwrite potential entries that +// were added by users manually we first have to check if there are the +// current expected entries in the file. Note that this will only check +// for one match not all. It will also only check that the ip and one of +// the hostnames match like Remove(). +func AddIfExists(file string, existsEntries, newEntries HostEntries) error { + if err := addIfExists(file, existsEntries, newEntries); err != nil { + return fmt.Errorf("failed to add entries to hosts file: %w", err) + } + return nil +} + +// Remove will remove the given entries from the file. An entry will be +// removed when the ip and at least one name matches. Not all names have +// to match. If the given entries are not present in the file no error is +// returned. +// Remove is not atomic because it will keep the current file inode. This is +// required to keep bind mounts for containers working. +func Remove(file string, entries HostEntries) error { + if err := remove(file, entries); err != nil { + return fmt.Errorf("failed to remove entries from hosts file: %w", err) + } + return nil +} + +// new see comment on New() +func new(params *Params) error { + entries, err := parseExtraHosts(params.ExtraHosts) + if err != nil { + return err + } + entries2, err := parseHostsFile(params.BaseFile) + if err != nil { + return err + } + entries = append(entries, entries2...) + + // preallocate the slice with enough space for the 3 special entries below + containerIPs := make(HostEntries, 0, len(params.ContainerIPs)+3) + + // if localhost was not added we add it + // https://github.com/containers/podman/issues/11411 + lh := []string{localhost} + l1 := HostEntry{IP: "127.0.0.1", Names: lh} + l2 := HostEntry{IP: "::1", Names: lh} + containerIPs = append(containerIPs, l1, l2) + if params.HostContainersInternalIP != "" { + e := HostEntry{IP: params.HostContainersInternalIP, Names: []string{hostContainersInternal}} + containerIPs = append(containerIPs, e) + } + containerIPs = append(containerIPs, params.ContainerIPs...) + + if err := writeHostFile(params.TargetFile, entries, containerIPs); err != nil { + return err + } + return nil +} + +// add see comment on Add() +func add(file string, entries HostEntries) error { + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + names := make(map[string]struct{}) + for _, entry := range currentEntries { + for _, name := range entry.Names { + names[name] = struct{}{} + } + } + + // open file in append mode since we only add, we do not have to write existing entries again + f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return err + } + defer f.Close() + + return addEntriesIfNotExists(f, entries, names) +} + +// addIfExists see comment on AddIfExists() +func addIfExists(file string, existsEntries, newEntries HostEntries) error { + // special case when there are no existing entries do a normal add + // this can happen when we connect a network which was not connected + // to any other networks before + if len(existsEntries) == 0 { + return add(file, newEntries) + } + + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + for _, entry := range currentEntries { + if !checkIfEntryExists(entry, existsEntries) { + // keep looking for existing entries + continue + } + // if we have a matching existing entry add the new entries + // open file in append mode since we only add, we do not have to write existing entries again + f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0o644) + if err != nil { + return err + } + defer f.Close() + + for _, e := range newEntries { + if _, err = f.WriteString(formatLine(e.IP, e.Names)); err != nil { + return err + } + } + return nil + } + // no match found is no error + return nil +} + +// remove see comment on Remove() +func remove(file string, entries HostEntries) error { + currentEntries, err := parseHostsFile(file) + if err != nil { + return err + } + + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + + for _, entry := range currentEntries { + if checkIfEntryExists(entry, entries) { + continue + } + if _, err = f.WriteString(formatLine(entry.IP, entry.Names)); err != nil { + return err + } + } + return nil +} + +func checkIfEntryExists(current HostEntry, entries HostEntries) bool { + // check if the current entry equals one of the given entries + for _, rm := range entries { + if current.IP == rm.IP { + // it is enough if one of the names match, in this case we remove the full entry + for _, name := range current.Names { + if util.StringInSlice(name, rm.Names) { + return true + } + } + } + } + return false +} + +// parseExtraHosts converts a slice of "name:ip" string to entries. +// Because podman and buildah both store the extra hosts in this format +// we convert it here instead of having to this on the caller side. +func parseExtraHosts(extraHosts []string) (HostEntries, error) { + entries := make(HostEntries, 0, len(extraHosts)) + for _, entry := range extraHosts { + values := strings.SplitN(entry, ":", 2) + if len(values) != 2 { + return nil, fmt.Errorf("unable to parse host entry %q: incorrect format", entry) + } + if values[0] == "" { + return nil, fmt.Errorf("hostname in host entry %q is empty", entry) + } + if values[1] == "" { + return nil, fmt.Errorf("IP address in host entry %q is empty", entry) + } + e := HostEntry{IP: values[1], Names: []string{values[0]}} + entries = append(entries, e) + } + return entries, nil +} + +// parseHostsFile parses a given host file and returns all entries in it. +// Note that this will remove all comments and spaces. +func parseHostsFile(file string) (HostEntries, error) { + // empty file is valid, in this case we skip adding entries from the file + if file == "" { + return nil, nil + } + + f, err := os.Open(file) + if err != nil { + // do not error when the default hosts file does not exists + // https://github.com/containers/podman/issues/12667 + if errors.Is(err, os.ErrNotExist) && file == config.DefaultHostsFile { + return nil, nil + } + return nil, err + } + defer f.Close() + + entries := HostEntries{} + scanner := bufio.NewScanner(f) + for scanner.Scan() { + // split of the comments + line := scanner.Text() + if c := strings.IndexByte(line, '#'); c != -1 { + line = line[:c] + } + fields := strings.Fields(line) + // if we only have a ip without names we skip it + if len(fields) < 2 { + continue + } + + e := HostEntry{IP: fields[0], Names: fields[1:]} + entries = append(entries, e) + } + + return entries, scanner.Err() +} + +// writeHostFile write the entries to the given file +func writeHostFile(file string, userEntries, containerIPs HostEntries) error { + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + + names := make(map[string]struct{}) + for _, entry := range userEntries { + for _, name := range entry.Names { + names[name] = struct{}{} + } + if _, err = f.WriteString(formatLine(entry.IP, entry.Names)); err != nil { + return err + } + } + + return addEntriesIfNotExists(f, containerIPs, names) +} + +// addEntriesIfNotExists only adds the entries for names that are not already +// in the hosts file, otherwise we start overwriting user entries +func addEntriesIfNotExists(f io.StringWriter, containerIPs HostEntries, names map[string]struct{}) error { + for _, entry := range containerIPs { + freeNames := make([]string, 0, len(entry.Names)) + for _, name := range entry.Names { + if _, ok := names[name]; !ok { + freeNames = append(freeNames, name) + } + } + if len(freeNames) > 0 { + if _, err := f.WriteString(formatLine(entry.IP, freeNames)); err != nil { + return err + } + } + } + return nil +} + +// formatLine converts the given ip and names to a valid hosts line. +// The returned string includes the newline. +func formatLine(ip string, names []string) string { + return ip + "\t" + strings.Join(names, " ") + "\n" +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/ip.go b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go new file mode 100644 index 00000000000..2b8186e72c7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/ip.go @@ -0,0 +1,92 @@ +package etchosts + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/storage/pkg/unshare" +) + +// GetHostContainersInternalIP return the host.containers.internal ip +// if netStatus is not nil then networkInterface also must be non nil otherwise this function panics +func GetHostContainersInternalIP(conf *config.Config, netStatus map[string]types.StatusBlock, networkInterface types.ContainerNetwork) string { + switch conf.Containers.HostContainersInternalIP { + case "": + // if empty (default) we will automatically choose one below + // if machine using gvproxy we let the gvproxy dns server handle the dns name so do not add it + if machine.IsGvProxyBased() { + return "" + } + case "none": + return "" + default: + return conf.Containers.HostContainersInternalIP + } + ip := "" + // Only use the bridge ip when root, as rootless the interfaces are created + // inside the special netns and not the host so we cannot use them. + if unshare.IsRootless() { + return getLocalIP() + } + for net, status := range netStatus { + network, err := networkInterface.NetworkInspect(net) + // only add the host entry for bridge networks + // ip/macvlan gateway is normally not on the host + if err != nil || network.Driver != types.BridgeNetworkDriver { + continue + } + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + if netAddress.Gateway != nil { + if util.IsIPv4(netAddress.Gateway) { + return netAddress.Gateway.String() + } + // ipv6 address but keep looking since we prefer to use ipv4 + ip = netAddress.Gateway.String() + } + } + } + } + if ip != "" { + return ip + } + return getLocalIP() +} + +// getLocalIP returns the non loopback local IP of the host +func getLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + ip := "" + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() { + if util.IsIPv4(ipnet.IP) { + return ipnet.IP.String() + } + // if ipv6 we keep looking for an ipv4 address + ip = ipnet.IP.String() + } + } + return ip +} + +// GetNetworkHostEntries returns HostEntries for all ips in the network status +// with the given hostnames. +func GetNetworkHostEntries(netStatus map[string]types.StatusBlock, names ...string) HostEntries { + hostEntries := make(HostEntries, 0, len(netStatus)) + for _, status := range netStatus { + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + e := HostEntry{IP: netAddress.IPNet.IP.String(), Names: names} + hostEntries = append(hostEntries, e) + } + } + } + return hostEntries +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/util.go b/vendor/github.com/containers/common/libnetwork/etchosts/util.go new file mode 100644 index 00000000000..d78284594bb --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/etchosts/util.go @@ -0,0 +1,30 @@ +package etchosts + +import ( + "fmt" + + "github.com/containers/common/pkg/config" + securejoin "github.com/cyphar/filepath-securejoin" +) + +// GetBaseHostFile return the hosts file which should be used as base. +// The first param should be the config value config.Containers.BaseHostsFile +// The second param should be the root path to the mounted image. This is +// required when the user conf value is set to "image". +func GetBaseHostFile(confValue, imageRoot string) (string, error) { + switch confValue { + case "": + return config.DefaultHostsFile, nil + case "none": + return "", nil + case "image": + // use secure join to prevent problems with symlinks + path, err := securejoin.SecureJoin(imageRoot, config.DefaultHostsFile) + if err != nil { + return "", fmt.Errorf("failed to get /etc/hosts path in image: %w", err) + } + return path, nil + default: + return confValue, nil + } +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go new file mode 100644 index 00000000000..bfa72808dda --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -0,0 +1,71 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/containers/common/pkg/config" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" +) + +func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error { + if network.NetworkInterface != "" { + bridges := GetBridgeInterfaceNames(n) + if pkgutil.StringInSlice(network.NetworkInterface, bridges) { + return errors.Errorf("bridge name %s already in use", network.NetworkInterface) + } + if !types.NameRegex.MatchString(network.NetworkInterface) { + return errors.Wrapf(types.RegexError, "bridge name %s invalid", network.NetworkInterface) + } + } else { + var err error + network.NetworkInterface, err = GetFreeDeviceName(n) + if err != nil { + return err + } + } + + ipamDriver := network.IPAMOptions[types.Driver] + // also do this when the driver is unset + if ipamDriver == "" || ipamDriver == types.HostLocalIPAMDriver { + if len(network.Subnets) == 0 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks, subnetPools) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + // ipv6 enabled means dual stack, check if we already have + // a ipv4 or ipv6 subnet and add one if not. + if network.IPv6Enabled { + ipv4 := false + ipv6 := false + for _, subnet := range network.Subnets { + if util.IsIPv6(subnet.Subnet.IP) { + ipv6 = true + } + if util.IsIPv4(subnet.Subnet.IP) { + ipv4 = true + } + } + if !ipv4 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks, subnetPools) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + if !ipv6 { + freeSubnet, err := GetFreeIPv6NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + } + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/create.go b/vendor/github.com/containers/common/libnetwork/internal/util/create.go new file mode 100644 index 00000000000..c1a4bee757b --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/create.go @@ -0,0 +1,49 @@ +package util + +import ( + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func CommonNetworkCreate(n NetUtil, network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + var name string + var err error + // validate the name when given + if network.Name != "" { + if !types.NameRegex.MatchString(network.Name) { + return errors.Wrapf(types.RegexError, "network name %s invalid", network.Name) + } + if _, err := n.Network(network.Name); err == nil { + return errors.Wrapf(types.ErrNetworkExists, "network name %s already used", network.Name) + } + } else { + name, err = GetFreeDeviceName(n) + if err != nil { + return err + } + network.Name = name + // also use the name as interface name when we create a bridge network + if network.Driver == types.BridgeNetworkDriver && network.NetworkInterface == "" { + network.NetworkInterface = name + } + } + return nil +} + +func IpamNoneDisableDns(network *types.Network) { + if network.IPAMOptions[types.Driver] == types.NoneIPAMDriver { + logrus.Debugf("dns disabled for network %q because ipam driver is set to none", network.Name) + network.DNSEnabled = false + } +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go new file mode 100644 index 00000000000..650fcb193c9 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go @@ -0,0 +1,19 @@ +package util + +import "github.com/containers/common/libnetwork/types" + +// This is a helper package to allow code sharing between the different +// network interfaces. + +// NetUtil is a helper interface which all network interfaces should implement to allow easy code sharing +type NetUtil interface { + // ForEach eaxecutes the given function for each network + ForEach(func(types.Network)) + // Len returns the number of networks + Len() int + // DefaultInterfaceName return the default interface name, this will be suffixed by a number + DefaultInterfaceName() string + // Network returns the network with the given name or ID. + // It returns an error if the network is not found + Network(nameOrID string) (*types.Network, error) +} diff --git a/vendor/github.com/containers/podman/v3/libpod/network/util/interfaces.go b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go similarity index 86% rename from vendor/github.com/containers/podman/v3/libpod/network/util/interfaces.go rename to vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go index dc2bd601db7..20819f75663 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/util/interfaces.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go @@ -2,9 +2,9 @@ package util import "net" -// GetLiveNetworkSubnets returns a slice of subnets representing what the system +// getLiveNetworkSubnets returns a slice of subnets representing what the system // has defined as network interfaces -func GetLiveNetworkSubnets() ([]*net.IPNet, error) { +func getLiveNetworkSubnets() ([]*net.IPNet, error) { addrs, err := net.InterfaceAddrs() if err != nil { return nil, err diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go new file mode 100644 index 00000000000..6dc5d932538 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go @@ -0,0 +1,71 @@ +package util + +import ( + "crypto/rand" + "net" + + "github.com/pkg/errors" +) + +func incByte(subnet *net.IPNet, idx int, shift uint) error { + if idx < 0 { + return errors.New("no more subnets left") + } + + var val byte = 1 << shift + // if overflow we have to inc the previous byte + if uint(subnet.IP[idx])+uint(val) > 255 { + if err := incByte(subnet, idx-1, 0); err != nil { + return err + } + } + subnet.IP[idx] += val + return nil +} + +// NextSubnet returns subnet incremented by 1 +func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) { + newSubnet := &net.IPNet{ + IP: subnet.IP, + Mask: subnet.Mask, + } + ones, bits := newSubnet.Mask.Size() + if ones == 0 { + return nil, errors.Errorf("%s has only one subnet", subnet.String()) + } + zeroes := uint(bits - ones) + shift := zeroes % 8 + idx := (ones - 1) / 8 + if err := incByte(newSubnet, idx, shift); err != nil { + return nil, err + } + return newSubnet, nil +} + +func NetworkIntersectsWithNetworks(n *net.IPNet, networklist []*net.IPNet) bool { + for _, nw := range networklist { + if networkIntersect(n, nw) { + return true + } + } + return false +} + +func networkIntersect(n1, n2 *net.IPNet) bool { + return n2.Contains(n1.IP) || n1.Contains(n2.IP) +} + +// getRandomIPv6Subnet returns a random internal ipv6 subnet as described in RFC3879. +func getRandomIPv6Subnet() (net.IPNet, error) { + ip := make(net.IP, 8, net.IPv6len) + // read 8 random bytes + _, err := rand.Read(ip) + if err != nil { + return net.IPNet{}, err + } + // first byte must be FD as per RFC3879 + ip[0] = 0xfd + // add 8 zero bytes + ip = append(ip, make([]byte, 8)...) + return net.IPNet{IP: ip, Mask: net.CIDRMask(64, 128)}, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go new file mode 100644 index 00000000000..1f68df0bb0d --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go @@ -0,0 +1,37 @@ +package util + +import ( + "strconv" + + "github.com/pkg/errors" +) + +// ParseMTU parses the mtu option +func ParseMTU(mtu string) (int, error) { + if mtu == "" { + return 0, nil // default + } + m, err := strconv.Atoi(mtu) + if err != nil { + return 0, err + } + if m < 0 { + return 0, errors.Errorf("mtu %d is less than zero", m) + } + return m, nil +} + +// ParseVlan parses the vlan option +func ParseVlan(vlan string) (int, error) { + if vlan == "" { + return 0, nil // default + } + v, err := strconv.Atoi(vlan) + if err != nil { + return 0, err + } + if v < 0 || v > 4094 { + return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v) + } + return v, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go new file mode 100644 index 00000000000..6b76a700b79 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -0,0 +1,131 @@ +package util + +import ( + "errors" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/util" + "github.com/sirupsen/logrus" +) + +// GetBridgeInterfaceNames returns all bridge interface names +// already used by network configs +func GetBridgeInterfaceNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetUsedNetworkNames returns all network names already used +// by network configs +func GetUsedNetworkNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetFreeDeviceName returns a free device name which can +// be used for new configs as name and bridge interface name. +// The base name is suffixed by a number +func GetFreeDeviceName(n NetUtil) (string, error) { + bridgeNames := GetBridgeInterfaceNames(n) + netNames := GetUsedNetworkNames(n) + liveInterfaces, err := GetLiveNetworkNames() + if err != nil { + return "", nil + } + names := make([]string, 0, len(bridgeNames)+len(netNames)+len(liveInterfaces)) + names = append(names, bridgeNames...) + names = append(names, netNames...) + names = append(names, liveInterfaces...) + // FIXME: Is a limit fine? + // Start by 1, 0 is reserved for the default network + for i := 1; i < 1000000; i++ { + deviceName := fmt.Sprintf("%s%d", n.DefaultInterfaceName(), i) + if !util.StringInSlice(deviceName, names) { + logrus.Debugf("found free device name %s", deviceName) + return deviceName, nil + } + } + return "", errors.New("could not find free device name, to many iterations") +} + +// GetUsedSubnets returns a list of all used subnets by network +// configs and interfaces on the host. +func GetUsedSubnets(n NetUtil) ([]*net.IPNet, error) { + // first, load all used subnets from network configs + subnets := make([]*net.IPNet, 0, n.Len()) + n.ForEach(func(n types.Network) { + for i := range n.Subnets { + subnets = append(subnets, &n.Subnets[i].Subnet.IPNet) + } + }) + // second, load networks from the current system + liveSubnets, err := getLiveNetworkSubnets() + if err != nil { + return nil, err + } + return append(subnets, liveSubnets...), nil +} + +// GetFreeIPv4NetworkSubnet returns a unused ipv4 subnet +func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) (*types.Subnet, error) { + var err error + for _, pool := range subnetPools { + // make sure to copy the netip to prevent overwriting the subnet pool + netIP := make(net.IP, net.IPv4len) + copy(netIP, pool.Base.IP) + network := &net.IPNet{ + IP: netIP, + Mask: net.CIDRMask(pool.Size, 32), + } + for pool.Base.Contains(network.IP) { + if !NetworkIntersectsWithNetworks(network, usedNetworks) { + logrus.Debugf("found free ipv4 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: *network}, + }, nil + } + network, err = NextSubnet(network) + if err != nil { + // when error go to next pool, we return the error only when all pools are done + break + } + } + } + + if err != nil { + return nil, err + } + return nil, errors.New("could not find free subnet from subnet pools") +} + +// GetFreeIPv6NetworkSubnet returns a unused ipv6 subnet +func GetFreeIPv6NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { + // FIXME: Is 10000 fine as limit? We should prevent an endless loop. + for i := 0; i < 10000; i++ { + // RFC4193: Choose the ipv6 subnet random and NOT sequentially. + network, err := getRandomIPv6Subnet() + if err != nil { + return nil, err + } + if intersectsConfig := NetworkIntersectsWithNetworks(&network, usedNetworks); !intersectsConfig { + logrus.Debugf("found free ipv6 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: network}, + }, nil + } + } + return nil, errors.New("failed to get random ipv6 subnet") +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go new file mode 100644 index 00000000000..4dd44110aaa --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -0,0 +1,124 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" +) + +// ValidateSubnet will validate a given Subnet. It checks if the +// given gateway and lease range are part of this subnet. If the +// gateway is empty and addGateway is true it will get the first +// available ip in the subnet assigned. +func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet) error { + if s == nil { + return errors.New("subnet is nil") + } + if s.Subnet.IP == nil { + return errors.New("subnet ip is nil") + } + + // Reparse to ensure subnet is valid. + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the subnet. + _, n, err := net.ParseCIDR(s.Subnet.String()) + if err != nil { + return errors.Wrap(err, "subnet invalid") + } + + // check that the new subnet does not conflict with existing ones + if NetworkIntersectsWithNetworks(n, usedNetworks) { + return errors.Errorf("subnet %s is already used on the host or by another config", n.String()) + } + + s.Subnet = types.IPNet{IPNet: *n} + if s.Gateway != nil { + if !s.Subnet.Contains(s.Gateway) { + return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet) + } + util.NormalizeIP(&s.Gateway) + } else if addGateway { + ip, err := util.FirstIPInSubnet(n) + if err != nil { + return err + } + s.Gateway = ip + } + + if s.LeaseRange != nil { + if s.LeaseRange.StartIP != nil { + if !s.Subnet.Contains(s.LeaseRange.StartIP) { + return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.StartIP) + } + if s.LeaseRange.EndIP != nil { + if !s.Subnet.Contains(s.LeaseRange.EndIP) { + return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.EndIP) + } + } + return nil +} + +// ValidateSubnets will validate the subnets for this network. +// It also sets the gateway if the gateway is empty and addGateway is set to true +// IPv6Enabled to true if at least one subnet is ipv6. +func ValidateSubnets(network *types.Network, addGateway bool, usedNetworks []*net.IPNet) error { + for i := range network.Subnets { + err := ValidateSubnet(&network.Subnets[i], addGateway, usedNetworks) + if err != nil { + return err + } + if util.IsIPv6(network.Subnets[i].Subnet.IP) { + network.IPv6Enabled = true + } + } + return nil +} + +func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOptions) error { + if namespacePath == "" { + return errors.New("namespacePath is empty") + } + if options.ContainerID == "" { + return errors.New("ContainerID is empty") + } + if len(options.Networks) == 0 { + return errors.New("must specify at least one network") + } + for name, netOpts := range options.Networks { + netOpts := netOpts + network, err := n.Network(name) + if err != nil { + return err + } + err = validatePerNetworkOpts(network, &netOpts) + if err != nil { + return err + } + } + return nil +} + +// validatePerNetworkOpts checks that all given static ips are in a subnet on this network +func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOptions) error { + if netOpts.InterfaceName == "" { + return errors.Errorf("interface name on network %s is empty", network.Name) + } + if network.IPAMOptions[types.Driver] == types.HostLocalIPAMDriver { + outer: + for _, ip := range netOpts.StaticIPs { + for _, s := range network.Subnets { + if s.Subnet.Contains(ip) { + continue outer + } + } + return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go new file mode 100644 index 00000000000..0bb0539b294 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -0,0 +1,280 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "net" + "os" + "path/filepath" + "time" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +func (n *netavarkNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.Name] = network + return *network, nil +} + +func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*types.Network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + if !defaultNet { + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + // generate random network ID + var i int + for i = 0; i < 1000; i++ { + id := stringid.GenerateNonCryptoID() + if _, err := n.getNetwork(id); err != nil { + newNetwork.ID = id + break + } + } + if i == 1000 { + return nil, errors.New("failed to create random network ID") + } + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + err = validateIPAMDriver(newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools) + if err != nil { + return nil, err + } + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range newNetwork.Options { + switch key { + case "mtu": + _, err = internalutil.ParseMTU(value) + if err != nil { + return nil, err + } + + case "vlan": + _, err = internalutil.ParseVlan(value) + if err != nil { + return nil, err + } + + default: + return nil, errors.Errorf("unsupported bridge network option %s", key) + } + } + case types.MacVLANNetworkDriver: + err = createMacvlan(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + // when we do not have ipam we must disable dns + internalutil.IpamNoneDisableDns(newNetwork) + + // add gateway when not internal or dns enabled + addGateway := !newNetwork.Internal || newNetwork.DNSEnabled + err = internalutil.ValidateSubnets(newNetwork, addGateway, usedNetworks) + if err != nil { + return nil, err + } + + newNetwork.Created = time.Now() + + if !defaultNet { + confPath := filepath.Join(n.networkConfigDir, newNetwork.Name+".json") + f, err := os.Create(confPath) + if err != nil { + return nil, err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(newNetwork) + if err != nil { + return nil, err + } + } + + return newNetwork, nil +} + +func createMacvlan(network *types.Network) error { + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !util.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + + // we already validated the drivers before so we just have to set the default here + switch network.IPAMOptions[types.Driver] { + case "": + if len(network.Subnets) == 0 { + return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark") + } + network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver + case types.HostLocalIPAMDriver: + if len(network.Subnets) == 0 { + return errors.Errorf("macvlan driver needs at least one subnet specified, when the host-local ipam driver is set") + } + } + + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range network.Options { + switch key { + case "mode": + if !util.StringInSlice(value, types.ValidMacVLANModes) { + return errors.Errorf("unknown macvlan mode %q", value) + } + case "mtu": + _, err := internalutil.ParseMTU(value) + if err != nil { + return err + } + default: + return errors.Errorf("unsupported macvlan network option %s", key) + } + } + return nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *netavarkNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + file := filepath.Join(n.networkConfigDir, network.Name+".json") + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + delete(n.networks, network.Name) + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *netavarkNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net) { + continue outer + } + } + networks = append(networks, *net) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *netavarkNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network, nil +} + +func validateIPAMDriver(n *types.Network) error { + ipamDriver := n.IPAMOptions[types.Driver] + switch ipamDriver { + case "", types.HostLocalIPAMDriver: + case types.NoneIPAMDriver: + if len(n.Subnets) > 0 { + return errors.New("none ipam driver is set but subnets are given") + } + case types.DHCPIPAMDriver: + return errors.New("dhcp ipam driver is not yet supported with netavark") + default: + return errors.Errorf("unsupported ipam driver %q", ipamDriver) + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go new file mode 100644 index 00000000000..29a7b4f2a58 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go @@ -0,0 +1,6 @@ +//go:build linux +// +build linux + +package netavark + +const defaultBridgeName = "podman" diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go new file mode 100644 index 00000000000..ac87c5438be --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -0,0 +1,162 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "errors" + "io" + "os" + "os/exec" + "strconv" + + "github.com/sirupsen/logrus" +) + +type netavarkError struct { + exitCode int + // Set the json key to "error" so we can directly unmarshal into this struct + Msg string `json:"error"` + err error +} + +func (e *netavarkError) Error() string { + ec := "" + // only add the exit code the the error message if we have at least info log level + // the normal user does not need to care about the number + if e.exitCode > 0 && logrus.IsLevelEnabled(logrus.InfoLevel) { + ec = " (exit code " + strconv.Itoa(e.exitCode) + ")" + } + msg := "netavark" + ec + if len(msg) > 0 { + msg += ": " + e.Msg + } + if e.err != nil { + msg += ": " + e.err.Error() + } + return msg +} + +func (e *netavarkError) Unwrap() error { + return e.err +} + +func newNetavarkError(msg string, err error) error { + return &netavarkError{ + Msg: msg, + err: err, + } +} + +// Type to implement io.Writer interface +// This will write the logrus at info level +type logrusNetavarkWriter struct{} + +func (l *logrusNetavarkWriter) Write(b []byte) (int, error) { + logrus.Info("netavark: ", string(b)) + return len(b), nil +} + +// getRustLogEnv returns the RUST_LOG env var based on the current logrus level +func getRustLogEnv() string { + level := logrus.GetLevel().String() + // rust env_log uses warn instead of warning + if level == "warning" { + level = "warn" + } + // the rust netlink library is very verbose + // make sure to only log netavark logs + return "RUST_LOG=netavark=" + level +} + +// execNetavark will execute netavark with the following arguments +// It takes the path to the binary, the list of args and an interface which is +// marshaled to json and send via stdin to netavark. The result interface is +// used to marshal the netavark output into it. This can be nil. +// All errors return by this function should be of the type netavarkError +// to provide a helpful error message. +func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{}) error { + stdinR, stdinW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdin pipe", err) + } + stdinWClosed := false + defer func() { + stdinR.Close() + if !stdinWClosed { + stdinW.Close() + } + }() + + stdoutR, stdoutW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdout pipe", err) + } + stdoutWClosed := false + defer func() { + stdoutR.Close() + if !stdoutWClosed { + stdoutW.Close() + } + }() + + // connect stderr to the podman stderr for logging + var logWriter io.Writer = os.Stderr + if n.syslog { + // connect logrus to stderr as well so that the logs will be written to the syslog as well + logWriter = io.MultiWriter(logWriter, &logrusNetavarkWriter{}) + } + + cmd := exec.Command(n.netavarkBinary, append(n.getCommonNetavarkOptions(), args...)...) + // connect the pipes to stdin and stdout + cmd.Stdin = stdinR + cmd.Stdout = stdoutW + cmd.Stderr = logWriter + // set the netavark log level to the same as the podman + cmd.Env = append(os.Environ(), getRustLogEnv()) + // if we run with debug log level lets also set RUST_BACKTRACE=1 so we can get the full stack trace in case of panics + if logrus.IsLevelEnabled(logrus.DebugLevel) { + cmd.Env = append(cmd.Env, "RUST_BACKTRACE=1") + } + + err = cmd.Start() + if err != nil { + return newNetavarkError("failed to start process", err) + } + err = json.NewEncoder(stdinW).Encode(stdin) + // we have to close stdinW so netavark gets the EOF and does not hang forever + stdinW.Close() + stdinWClosed = true + if err != nil { + return newNetavarkError("failed to encode stdin data", err) + } + + dec := json.NewDecoder(stdoutR) + + err = cmd.Wait() + // we have to close stdoutW so we can decode the json without hanging forever + stdoutW.Close() + stdoutWClosed = true + if err != nil { + exitError := &exec.ExitError{} + if errors.As(err, &exitError) { + ne := &netavarkError{} + // lets disallow unknown fields to make sure we do not get some unexpected stuff + dec.DisallowUnknownFields() + // this will unmarshal the error message into the error struct + ne.err = dec.Decode(ne) + ne.exitCode = exitError.ExitCode() + return ne + } + return newNetavarkError("unexpected failure during execution", err) + } + + if result != nil { + err = dec.Decode(result) + if err != nil { + return newNetavarkError("failed to decode result", err) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go new file mode 100644 index 00000000000..861854351d6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -0,0 +1,371 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" + "go.etcd.io/bbolt" +) + +// IPAM boltdb structure +// Each network has their own bucket with the network name as bucket key. +// Inside the network bucket there is an ID bucket which maps the container ID (key) +// to a json array of ip addresses (value). +// The network bucket also has a bucket for each subnet, the subnet is used as key. +// Inside the subnet bucket an ip is used as key and the container ID as value. + +const ( + idBucket = "ids" + // lastIP this is used as key to store the last allocated ip + // note that this string should not be 4 or 16 byte long + lastIP = "lastIP" +) + +var ( + idBucketKey = []byte(idBucket) + lastIPKey = []byte(lastIP) +) + +type ipamError struct { + msg string + cause error +} + +func (e *ipamError) Error() string { + msg := "IPAM error" + if e.msg != "" { + msg += ": " + e.msg + } + if e.cause != nil { + msg += ": " + e.cause.Error() + } + return msg +} + +func newIPAMError(cause error, msg string, args ...interface{}) *ipamError { + return &ipamError{ + msg: fmt.Sprintf(msg, args...), + cause: cause, + } +} + +// openDB will open the ipam database +// Note that the caller has to Close it. +func (n *netavarkNetwork) openDB() (*bbolt.DB, error) { + db, err := bbolt.Open(n.ipamDBPath, 0o600, nil) + if err != nil { + return nil, newIPAMError(err, "failed to open database %s", n.ipamDBPath) + } + return db, nil +} + +// allocIPs will allocate ips for the the container. It will change the +// NetworkOptions in place. When static ips are given it will validate +// that these are free to use and will allocate them to the container. +func (n *netavarkNetwork) allocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + + // create/get network bucket + netBkt, err := tx.CreateBucketIfNotExists([]byte(netName)) + if err != nil { + return newIPAMError(err, "failed to create/get network bucket for network %s", netName) + } + + // requestIPs is the list of ips which should be used for this container + requestIPs := make([]net.IP, 0, len(network.Subnets)) + + for i := range network.Subnets { + subnetBkt, err := netBkt.CreateBucketIfNotExists([]byte(network.Subnets[i].Subnet.String())) + if err != nil { + return newIPAMError(err, "failed to create/get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if network.Subnets[i].Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + + // when static ip is requested for this network + if ip != nil { + // convert to 4 byte ipv4 if needed + util.NormalizeIP(&ip) + id := subnetBkt.Get(ip) + if id != nil { + return newIPAMError(nil, "requested ip address %s is already allocated to container ID %s", ip.String(), string(id)) + } + } else { + ip, err = getFreeIPFromBucket(subnetBkt, &network.Subnets[i]) + if err != nil { + return err + } + err = subnetBkt.Put(lastIPKey, ip) + if err != nil { + return newIPAMError(err, "failed to store last ip in database") + } + } + + err = subnetBkt.Put(ip, []byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to store ip in database") + } + + requestIPs = append(requestIPs, ip) + } + + idsBucket, err := netBkt.CreateBucketIfNotExists(idBucketKey) + if err != nil { + return newIPAMError(err, "failed to create/get id bucket for network %s", netName) + } + + ipsBytes, err := json.Marshal(requestIPs) + if err != nil { + return newIPAMError(err, "failed to marshal ips") + } + + err = idsBucket.Put([]byte(opts.ContainerID), ipsBytes) + if err != nil { + return newIPAMError(err, "failed to store ips in database") + } + + netOpts.StaticIPs = requestIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +func getFreeIPFromBucket(bucket *bbolt.Bucket, subnet *types.Subnet) (net.IP, error) { + var rangeStart net.IP + var rangeEnd net.IP + if subnet.LeaseRange != nil { + rangeStart = subnet.LeaseRange.StartIP + rangeEnd = subnet.LeaseRange.EndIP + } + + if rangeStart == nil { + // let start with the first ip in subnet + rangeStart = util.NextIP(subnet.Subnet.IP) + } + + if rangeEnd == nil { + lastIP, err := util.LastIPInSubnet(&subnet.Subnet.IPNet) + // this error should never happen but lets check anyways to prevent panics + if err != nil { + return nil, errors.Wrap(err, "failed to get lastIP") + } + // ipv4 uses the last ip in a subnet for broadcast so we cannot use it + if util.IsIPv4(lastIP) { + lastIP = util.PrevIP(lastIP) + } + rangeEnd = lastIP + } + + lastIPByte := bucket.Get(lastIPKey) + curIP := net.IP(lastIPByte) + if curIP == nil { + curIP = rangeStart + } else { + curIP = util.NextIP(curIP) + } + + // store the start ip to make sure we know when we looped over all available ips + startIP := curIP + + for { + // skip the gateway + if subnet.Gateway != nil { + if util.Cmp(curIP, subnet.Gateway) == 0 { + curIP = util.NextIP(curIP) + continue + } + } + + // if we are at the end we need to jump back to the start ip + if util.Cmp(curIP, rangeEnd) > 0 { + if util.Cmp(rangeStart, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + curIP = rangeStart + continue + } + + // check if ip is already used by another container + // if not return it + if bucket.Get(curIP) == nil { + return curIP, nil + } + + curIP = util.NextIP(curIP) + + if util.Cmp(curIP, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + } +} + +// getAssignedIPs will read the ipam database and will fill in the used ips for this container. +// It will change the NetworkOptions in place. +func (n *netavarkNetwork) getAssignedIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.View(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + ipJSON := idBkt.Get([]byte(opts.ContainerID)) + if ipJSON == nil { + return newIPAMError(nil, "failed to get ips for container ID %s on network %s", opts.ContainerID, netName) + } + + // assignedIPs is the list of ips which should be used for this container + assignedIPs := make([]net.IP, 0, len(network.Subnets)) + + err = json.Unmarshal(ipJSON, &assignedIPs) + if err != nil { + return newIPAMError(err, "failed to unmarshal ips from database") + } + + for i := range assignedIPs { + util.NormalizeIP(&assignedIPs[i]) + } + + netOpts.StaticIPs = assignedIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +// deallocIPs will release the ips in the network options from the DB so that +// they can be reused by other containers. It expects that the network options +// are already filled with the correct ips. Use getAssignedIPs() for this. +func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + for _, subnet := range network.Subnets { + subnetBkt := netBkt.Bucket([]byte(subnet.Subnet.String())) + if subnetBkt == nil { + return newIPAMError(nil, "failed to get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if subnet.Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + if ip == nil { + return newIPAMError(nil, "failed to find ip for subnet %s on network %s", subnet.Subnet.String(), netName) + } + util.NormalizeIP(&ip) + + err = subnetBkt.Delete(ip) + if err != nil { + return newIPAMError(err, "failed to remove ip %s from subnet bucket for network %s", ip.String(), netName) + } + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + err = idBkt.Delete([]byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to remove allocated ips for container ID %s on network %s", opts.ContainerID, netName) + } + } + return nil + }) + return err +} + +// requiresIPAMAlloc return true when we have to allocate ips for this network +// it checks the ipam driver and if subnets are set +func requiresIPAMAlloc(network *types.Network) bool { + // only do host allocation when driver is set to HostLocalIPAMDriver or unset + switch network.IPAMOptions[types.Driver] { + case "", types.HostLocalIPAMDriver: + default: + return false + } + + // no subnets == no ips to assign + return len(network.Subnets) > 0 +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go new file mode 100644 index 00000000000..8e7576a56e4 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -0,0 +1,323 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkNetwork struct { + // networkConfigDir is directory where the network config files are stored. + networkConfigDir string + + // networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc + networkRunDir string + + // tells netavark whether this is rootless mode or rootful, "true" or "false" + networkRootless bool + + // netavarkBinary is the path to the netavark binary. + netavarkBinary string + // aardvarkBinary is the path to the aardvark binary. + aardvarkBinary string + + // defaultNetwork is the name for the default network. + defaultNetwork string + // defaultSubnet is the default subnet for the default network. + defaultSubnet types.IPNet + + // defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + defaultsubnetPools []config.SubnetPool + + // ipamDBPath is the path to the ip allocation bolt db + ipamDBPath string + + // syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + syslog bool + + // lock is a internal lock for critical operations + lock lockfile.Locker + + // modTime is the timestamp when the config dir was modified + modTime time.Time + + // networks is a map with loaded networks, the key is the network name + networks map[string]*types.Network +} + +type InitConfig struct { + // NetworkConfigDir is directory where the network config files are stored. + NetworkConfigDir string + + // NetavarkBinary is the path to the netavark binary. + NetavarkBinary string + // AardvarkBinary is the path to the aardvark binary. + AardvarkBinary string + + // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config + NetworkRunDir string + + // DefaultNetwork is the name for the default network. + DefaultNetwork string + // DefaultSubnet is the default subnet for the default network. + DefaultSubnet string + + // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create + DefaultsubnetPools []config.SubnetPool + + // Syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + Syslog bool +} + +// NewNetworkInterface creates the ContainerNetwork interface for the netavark backend. +// Note: The networks are not loaded from disk until a method is called. +func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { + // TODO: consider using a shared memory lock + lock, err := lockfile.GetLockfile(filepath.Join(conf.NetworkConfigDir, "netavark.lock")) + if err != nil { + return nil, err + } + + defaultNetworkName := conf.DefaultNetwork + if defaultNetworkName == "" { + defaultNetworkName = types.DefaultNetworkName + } + + defaultSubnet := conf.DefaultSubnet + if defaultSubnet == "" { + defaultSubnet = types.DefaultSubnet + } + defaultNet, err := types.ParseCIDR(defaultSubnet) + if err != nil { + return nil, errors.Wrap(err, "failed to parse default subnet") + } + + if err := os.MkdirAll(conf.NetworkConfigDir, 0o755); err != nil { + return nil, err + } + + if err := os.MkdirAll(conf.NetworkRunDir, 0o755); err != nil { + return nil, err + } + + defaultSubnetPools := conf.DefaultsubnetPools + if defaultSubnetPools == nil { + defaultSubnetPools = config.DefaultSubnetPools + } + + n := &netavarkNetwork{ + networkConfigDir: conf.NetworkConfigDir, + networkRunDir: conf.NetworkRunDir, + netavarkBinary: conf.NetavarkBinary, + aardvarkBinary: conf.AardvarkBinary, + networkRootless: unshare.IsRootless(), + ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + defaultsubnetPools: defaultSubnetPools, + lock: lock, + syslog: conf.Syslog, + } + + return n, nil +} + +// Drivers will return the list of supported network drivers +// for this interface. +func (n *netavarkNetwork) Drivers() []string { + return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver} +} + +// DefaultNetworkName will return the default netavark network name. +func (n *netavarkNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + +func (n *netavarkNetwork) loadNetworks() error { + // check the mod time of the config dir + f, err := os.Stat(n.networkConfigDir) + if err != nil { + return err + } + modTime := f.ModTime() + + // skip loading networks if they are already loaded and + // if the config dir was not modified since the last call + if n.networks != nil && modTime.Equal(n.modTime) { + return nil + } + // make sure the remove all networks before we reload them + n.networks = nil + n.modTime = modTime + + files, err := ioutil.ReadDir(n.networkConfigDir) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + + networks := make(map[string]*types.Network, len(files)) + for _, f := range files { + if f.IsDir() { + continue + } + if filepath.Ext(f.Name()) != ".json" { + continue + } + + path := filepath.Join(n.networkConfigDir, f.Name()) + file, err := os.Open(path) + if err != nil { + // do not log ENOENT errors + if !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Error loading network config file %q: %v", path, err) + } + continue + } + network := new(types.Network) + err = json.NewDecoder(file).Decode(network) + if err != nil { + logrus.Warnf("Error reading network config file %q: %v", path, err) + continue + } + + // check that the filename matches the network name + if network.Name+".json" != f.Name() { + logrus.Warnf("Network config name %q does not match file name %q, skipping", network.Name, f.Name()) + continue + } + + if !types.NameRegex.MatchString(network.Name) { + logrus.Warnf("Network config %q has invalid name: %q, skipping: %v", path, network.Name, types.RegexError) + continue + } + + err = parseNetwork(network) + if err != nil { + logrus.Warnf("Network config %q could not be parsed, skipping: %v", path, err) + continue + } + + logrus.Debugf("Successfully loaded network %s: %v", network.Name, network) + networks[network.Name] = network + } + + // create the default network in memory if it did not exists on disk + if networks[n.defaultNetwork] == nil { + networkInfo, err := n.createDefaultNetwork() + if err != nil { + return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + } + networks[n.defaultNetwork] = networkInfo + } + logrus.Debugf("Successfully loaded %d networks", len(networks)) + n.networks = networks + return nil +} + +func parseNetwork(network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + if len(network.ID) != 64 { + return errors.Errorf("invalid network ID %q", network.ID) + } + + // add gateway when not internal or dns enabled + addGateway := !network.Internal || network.DNSEnabled + return util.ValidateSubnets(network, addGateway, nil) +} + +func (n *netavarkNetwork) createDefaultNetwork() (*types.Network, error) { + net := types.Network{ + Name: n.defaultNetwork, + NetworkInterface: defaultBridgeName + "0", + // Important do not change this ID + ID: "2f259bab93aaaaa2542ba43ef33eb990d0999ee1b9924b557b7be53c0b7a1bb9", + Driver: types.BridgeNetworkDriver, + Subnets: []types.Subnet{ + {Subnet: n.defaultSubnet}, + }, + } + return n.networkCreate(&net, true) +} + +// getNetwork will lookup a network by name or ID. It returns an +// error when no network was found or when more than one network +// with the given (partial) ID exists. +// getNetwork will read from the networks map, therefore the caller +// must ensure that n.lock is locked before using it. +func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) { + // fast path check the map key, this will only work for names + if val, ok := n.networks[nameOrID]; ok { + return val, nil + } + // If there was no match we might got a full or partial ID. + var net *types.Network + for _, val := range n.networks { + // This should not happen because we already looked up the map by name but check anyway. + if val.Name == nameOrID { + return val, nil + } + + if strings.HasPrefix(val.ID, nameOrID) { + if net != nil { + return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + } + net = val + } + } + if net != nil { + return net, nil + } + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) +} + +// Implement the NetUtil interface for easy code sharing with other network interfaces. + +// ForEach call the given function for each network +func (n *netavarkNetwork) ForEach(run func(types.Network)) { + for _, val := range n.networks { + run(*val) + } +} + +// Len return the number of networks +func (n *netavarkNetwork) Len() int { + return len(n.networks) +} + +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *netavarkNetwork) DefaultInterfaceName() string { + return defaultBridgeName +} + +func (n *netavarkNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err + } + return network, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go new file mode 100644 index 00000000000..c5aa181fd32 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -0,0 +1,143 @@ +//go:build linux +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkOptions struct { + types.NetworkOptions + Networks map[string]*types.Network `json:"network_info"` +} + +// Setup will setup the container network namespace. It returns +// a map of StatusBlocks, the key is the network name. +func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err + } + + // allocate IPs in the IPAM db + err = n.allocIPs(&options.NetworkOptions) + if err != nil { + return nil, err + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return nil, errors.Wrap(err, "failed to convert net opts") + } + + // Warn users if one or more networks have dns enabled + // but aardvark-dns binary is not configured + for _, network := range netavarkOpts.Networks { + if network != nil && network.DNSEnabled && n.aardvarkBinary == "" { + // this is not a fatal error we can still use container without dns + logrus.Warnf("aardvark-dns binary not found, container dns will not be enabled") + break + } + } + + // trace output to get the json + if logrus.IsLevelEnabled(logrus.TraceLevel) { + b, err := json.Marshal(&netavarkOpts) + if err != nil { + return nil, err + } + // show the full netavark command so we can easily reproduce errors from the cli + logrus.Tracef("netavark command: printf '%s' | %s setup %s", string(b), n.netavarkBinary, namespacePath) + } + + result := map[string]types.StatusBlock{} + err = n.execNetavark([]string{"setup", namespacePath}, netavarkOpts, &result) + if err != nil { + // lets dealloc ips to prevent leaking + if err := n.deallocIPs(&options.NetworkOptions); err != nil { + logrus.Error(err) + } + return nil, err + } + + // make sure that the result makes sense + if len(result) != len(options.Networks) { + logrus.Errorf("unexpected netavark result: %v", result) + return nil, fmt.Errorf("unexpected netavark result length, want (%d), got (%d) networks", len(options.Networks), len(result)) + } + + return result, err +} + +// Teardown will teardown the container network namespace. +func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownOptions) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + // get IPs from the IPAM db + err = n.getAssignedIPs(&options.NetworkOptions) + if err != nil { + // when there is an error getting the ips we should still continue + // to call teardown for netavark to prevent leaking network interfaces + logrus.Error(err) + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return errors.Wrap(err, "failed to convert net opts") + } + + retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil) + + // when netavark returned an error we still free the used ips + // otherwise we could end up in a state where block the ips forever + err = n.deallocIPs(&netavarkOpts.NetworkOptions) + if err != nil { + if retErr != nil { + logrus.Error(err) + } else { + retErr = err + } + } + + return retErr +} + +func (n *netavarkNetwork) getCommonNetavarkOptions() []string { + return []string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "--aardvark-binary=" + n.aardvarkBinary} +} + +func (n *netavarkNetwork) convertNetOpts(opts types.NetworkOptions) (*netavarkOptions, error) { + netavarkOptions := netavarkOptions{ + NetworkOptions: opts, + Networks: make(map[string]*types.Network, len(opts.Networks)), + } + + for network := range opts.Networks { + net, err := n.getNetwork(network) + if err != nil { + return nil, err + } + netavarkOptions.Networks[network] = net + } + return &netavarkOptions, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go new file mode 100644 index 00000000000..893bdea2ef3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -0,0 +1,202 @@ +//go:build linux +// +build linux + +package network + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/cni" + "github.com/containers/common/libnetwork/netavark" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/storage" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" +) + +const ( + // defaultNetworkBackendFileName is the file name for sentinel file to store the backend + defaultNetworkBackendFileName = "defaultNetworkBackend" + // cniConfigDir is the directory where cni configuration is found + cniConfigDir = "/etc/cni/net.d/" + // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins + cniConfigDirRootless = "cni/net.d/" + // netavarkConfigDir is the config directory for the rootful network files + netavarkConfigDir = "/etc/containers/networks" + // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db + netavarkRunDir = "/run/containers/networks" + + // netavarkBinary is the name of the netavark binary + netavarkBinary = "netavark" + // aardvarkBinary is the name of the aardvark binary + aardvarkBinary = "aardvark-dns" +) + +// NetworkBackend returns the network backend name and interface +// It returns either the CNI or netavark backend depending on what is set in the config. +// If the the backend is set to "" we will automatically assign the backend on the following conditions: +// 1. read ${graphroot}/defaultNetworkBackend +// 2. find netavark binary (if not installed use CNI) +// 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI +func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + backend := types.NetworkBackend(conf.Network.NetworkBackend) + if backend == "" { + var err error + backend, err = defaultNetworkBackend(store, conf) + if err != nil { + return "", nil, fmt.Errorf("failed to get default network backend: %w", err) + } + } + + switch backend { + case types.Netavark: + netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) + if err != nil { + return "", nil, err + } + + aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false) + + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + confDir = getDefaultNetavarkConfigDir(store) + } + + // We cannot use the runroot for rootful since the network namespace is shared for all + // libpod instances they also have to share the same ipam db. + // For rootless we have our own network namespace per libpod instances, + // so this is not a problem there. + runDir := netavarkRunDir + if unshare.IsRootless() { + runDir = filepath.Join(store.RunRoot(), "networks") + } + + netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ + NetworkConfigDir: confDir, + NetworkRunDir: runDir, + NetavarkBinary: netavarkBin, + AardvarkBinary: aardvarkBin, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + DefaultsubnetPools: conf.Network.DefaultSubnetPools, + Syslog: syslog, + }) + return types.Netavark, netInt, err + case types.CNI: + netInt, err := getCniInterface(conf) + return types.CNI, netInt, err + + default: + return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + } +} + +func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { + // read defaultNetworkBackend file + file := filepath.Join(store.GraphRoot(), defaultNetworkBackendFileName) + b, err := ioutil.ReadFile(file) + if err == nil { + val := string(b) + if val == string(types.Netavark) { + return types.Netavark, nil + } + if val == string(types.CNI) { + return types.CNI, nil + } + return "", fmt.Errorf("unknown network backend value %q in %q", val, file) + } + // fail for all errors except ENOENT + if !errors.Is(err, os.ErrNotExist) { + return "", fmt.Errorf("could not read network backend value: %w", err) + } + + // cache the network backend to make sure always the same one will be used + defer func() { + // only write when there is no error + if err == nil { + if err := ioutils.AtomicWriteFile(file, []byte(backend), 0o644); err != nil { + logrus.Errorf("could not write network backend to file: %v", err) + } + } + }() + + _, err = conf.FindHelperBinary("netavark", false) + if err != nil { + // if we cannot find netavark use CNI + return types.CNI, nil + } + + // now check if there are already containers, images and CNI networks (new install?) + cons, err := store.Containers() + if err != nil { + return "", err + } + if len(cons) == 0 { + imgs, err := store.Images() + if err != nil { + return "", err + } + if len(imgs) == 0 { + cniInterface, err := getCniInterface(conf) + if err == nil { + nets, err := cniInterface.NetworkList() + // there is always a default network so check <= 1 + if err == nil && len(nets) <= 1 { + // we have a fresh system so use netavark + return types.Netavark, nil + } + } + } + } + return types.CNI, nil +} + +func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + var err error + confDir, err = getDefultCNIConfigDir() + if err != nil { + return nil, err + } + } + return cni.NewCNINetworkInterface(&cni.InitConfig{ + CNIConfigDir: confDir, + CNIPluginDirs: conf.Network.CNIPluginDirs, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + DefaultsubnetPools: conf.Network.DefaultSubnetPools, + IsMachine: machine.IsGvProxyBased(), + }) +} + +func getDefultCNIConfigDir() (string, error) { + if !unshare.IsRootless() { + return cniConfigDir, nil + } + + configHome, err := homedir.GetConfigHome() + if err != nil { + return "", err + } + return filepath.Join(configHome, cniConfigDirRootless), nil +} + +// getDefaultNetavarkConfigDir return the netavark config dir. For rootful it will +// use "/etc/containers/networks" and for rootless "$graphroot/networks". We cannot +// use the graphroot for rootful since the network namespace is shared for all +// libpod instances. +func getDefaultNetavarkConfigDir(store storage.Store) string { + if !unshare.IsRootless() { + return netavarkConfigDir + } + return filepath.Join(store.GraphRoot(), "networks") +} diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go new file mode 100644 index 00000000000..a1e4d71edd8 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -0,0 +1,50 @@ +package types + +const ( + // BridgeNetworkDriver defines the bridge driver + BridgeNetworkDriver = "bridge" + // DefaultNetworkDriver is the default network type used + DefaultNetworkDriver = BridgeNetworkDriver + // MacVLANNetworkDriver defines the macvlan driver + MacVLANNetworkDriver = "macvlan" + // MacVLANNetworkDriver defines the macvlan driver + IPVLANNetworkDriver = "ipvlan" + + // IPAM drivers + Driver = "driver" + // HostLocalIPAMDriver store the ip locally in a db + HostLocalIPAMDriver = "host-local" + // DHCPIPAMDriver get subnet and ip from dhcp server + DHCPIPAMDriver = "dhcp" + // NoneIPAMDriver do not provide ipam management + NoneIPAMDriver = "none" + + // DefaultSubnet is the name that will be used for the default CNI network. + DefaultNetworkName = "podman" + // DefaultSubnet is the subnet that will be used for the default CNI network. + DefaultSubnet = "10.88.0.0/16" + + // valid macvlan driver mode values + MacVLANModeBridge = "bridge" + MacVLANModePrivate = "private" + MacVLANModeVepa = "vepa" + MacVLANModePassthru = "passthru" + + // valid ipvlan driver modes + IPVLANModeL2 = "l2" + IPVLANModeL3 = "l3" + IPVLANModeL3s = "l3s" +) + +type NetworkBackend string + +const ( + CNI NetworkBackend = "cni" + Netavark NetworkBackend = "netavark" +) + +// ValidMacVLANModes is the list of valid mode options for the macvlan driver +var ValidMacVLANModes = []string{MacVLANModeBridge, MacVLANModePrivate, MacVLANModeVepa, MacVLANModePassthru} + +// ValidIPVLANModes is the list of valid mode options for the ipvlan driver +var ValidIPVLANModes = []string{IPVLANModeL2, IPVLANModeL3, IPVLANModeL3s} diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go new file mode 100644 index 00000000000..d37e529dfe6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -0,0 +1,25 @@ +package types + +import ( + "regexp" + + "github.com/pkg/errors" +) + +var ( + // ErrNoSuchNetwork indicates the requested network does not exist + ErrNoSuchNetwork = errors.New("network not found") + + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = errors.New("invalid argument") + + // ErrNetworkExists indicates that a network with the given name already + // exists. + ErrNetworkExists = errors.New("network already exists") + + // NameRegex is a regular expression to validate names. + // This must NOT be changed. + NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + // RegexError is thrown in presence of an invalid name. + RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") +) diff --git a/vendor/github.com/containers/podman/v3/libpod/network/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go similarity index 76% rename from vendor/github.com/containers/podman/v3/libpod/network/types/network.go rename to vendor/github.com/containers/common/libnetwork/types/network.go index 2fe4f3da2b8..de865537738 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/types/network.go +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -1,6 +1,7 @@ package types import ( + "encoding/json" "net" "time" ) @@ -27,6 +28,10 @@ type ContainerNetwork interface { // Drivers will return the list of supported network drivers // for this interface. Drivers() []string + + // DefaultNetworkName will return the default network name + // for this interface. + DefaultNetworkName() string } // Network describes the Network attributes. @@ -37,11 +42,11 @@ type Network struct { ID string `json:"id"` // Driver for this Network, e.g. bridge, macvlan... Driver string `json:"driver"` - // InterfaceName is the network interface name on the host. + // NetworkInterface is the network interface name on the host. NetworkInterface string `json:"network_interface,omitempty"` // Created contains the timestamp when this network was created. Created time.Time `json:"created,omitempty"` - // Subnets to use. + // Subnets to use for this network. Subnets []Subnet `json:"subnets,omitempty"` // IPv6Enabled if set to true an ipv6 subnet should be created for this net. IPv6Enabled bool `json:"ipv6_enabled"` @@ -68,7 +73,7 @@ type IPNet struct { // ParseCIDR parse a string to IPNet func ParseCIDR(cidr string) (IPNet, error) { - ip, net, err := net.ParseCIDR(cidr) + ip, subnet, err := net.ParseCIDR(cidr) if err != nil { return IPNet{}, err } @@ -77,8 +82,8 @@ func ParseCIDR(cidr string) (IPNet, error) { if ipv4 != nil { ip = ipv4 } - net.IP = ip - return IPNet{*net}, err + subnet.IP = ip + return IPNet{*subnet}, err } func (n *IPNet) MarshalText() ([]byte, error) { @@ -86,11 +91,56 @@ func (n *IPNet) MarshalText() ([]byte, error) { } func (n *IPNet) UnmarshalText(text []byte) error { - net, err := ParseCIDR(string(text)) + subnet, err := ParseCIDR(string(text)) if err != nil { return err } - *n = net + *n = subnet + return nil +} + +// HardwareAddr is the same as net.HardwareAddr except +// that it adds the json marshal/unmarshal methods. +// This allows us to read the mac from a json string +// and a byte array. +// swagger:model MacAddress +type HardwareAddr net.HardwareAddr + +func (h *HardwareAddr) String() string { + return (*net.HardwareAddr)(h).String() +} + +func (h HardwareAddr) MarshalText() ([]byte, error) { + return []byte(h.String()), nil +} + +func (h *HardwareAddr) UnmarshalJSON(text []byte) error { + if len(text) == 0 { + *h = nil + return nil + } + + // if the json string start with a quote we got a string + // unmarshal the string and parse the mac from this string + if string(text[0]) == `"` { + var macString string + err := json.Unmarshal(text, &macString) + if err == nil { + mac, err := net.ParseMAC(macString) + if err == nil { + *h = HardwareAddr(mac) + return nil + } + } + } + // not a string or got an error fallback to the normal parsing + mac := make(net.HardwareAddr, 0, 6) + // use the standard json unmarshal for backwards compat + err := json.Unmarshal(text, &mac) + if err != nil { + return err + } + *h = HardwareAddr(mac) return nil } @@ -131,24 +181,24 @@ type StatusBlock struct { // NetInterface contains the settings for a given network interface. type NetInterface struct { - // Networks list of assigned subnets with their gateway. - Networks []NetAddress `json:"networks,omitempty"` + // Subnets list of assigned subnets with their gateway. + Subnets []NetAddress `json:"subnets,omitempty"` // MacAddress for this Interface. - MacAddress net.HardwareAddr `json:"mac_address"` + MacAddress HardwareAddr `json:"mac_address"` } -// NetAddress contains the subnet and gatway. +// NetAddress contains the ip address, subnet and gateway. type NetAddress struct { - // Subnet of this NetAddress. Note that the subnet contains the - // actual ip of the net interface and not the network address. - Subnet IPNet `json:"subnet"` - // Gateway for the Subnet. This can be nil if there is no gateway, e.g. internal network. + // IPNet of this NetAddress. Note that this is a subnet but it has to contain the + // actual ip of the network interface and not the network address. + IPNet IPNet `json:"ipnet"` + // Gateway for the network. This can be empty if there is no gateway, e.g. internal network. Gateway net.IP `json:"gateway,omitempty"` } // PerNetworkOptions are options which should be set on a per network basis. type PerNetworkOptions struct { - // StaticIPv4 for this container. Optional. + // StaticIPs for this container. Optional. StaticIPs []net.IP `json:"static_ips,omitempty"` // Aliases contains a list of names which the dns server should resolve // to this container. Should only be set when DNSEnabled is true on the Network. @@ -157,8 +207,9 @@ type PerNetworkOptions struct { // Optional. Aliases []string `json:"aliases,omitempty"` // StaticMac for this container. Optional. - StaticMAC net.HardwareAddr `json:"static_mac,omitempty"` - // InterfaceName for this container. Required. + StaticMAC HardwareAddr `json:"static_mac,omitempty"` + // InterfaceName for this container. Required in the backend. + // Optional in the frontend. Will be filled with ethX (where X is a integer) when empty. InterfaceName string `json:"interface_name"` } @@ -206,7 +257,7 @@ type PortMapping struct { } // OCICNIPortMapping maps to the standard CNI portmapping Capability. -// Deprecated, do not use this struct for new fields. This only exists +// Deprecated: Do not use this struct for new fields. This only exists // for backwards compatibility. type OCICNIPortMapping struct { // HostPort is the port number on the host. diff --git a/vendor/github.com/containers/podman/v3/libpod/network/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go similarity index 70% rename from vendor/github.com/containers/podman/v3/libpod/network/util/filters.go rename to vendor/github.com/containers/common/libnetwork/util/filters.go index c3c80b352ef..58d79d25bd5 100644 --- a/vendor/github.com/containers/podman/v3/libpod/network/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -3,14 +3,15 @@ package util import ( "strings" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/filters" + "github.com/containers/common/pkg/util" "github.com/pkg/errors" ) -func GenerateNetworkFilters(filters map[string][]string) ([]types.FilterFunc, error) { - filterFuncs := make([]types.FilterFunc, 0, len(filters)) - for key, filterValues := range filters { +func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { filterFunc, err := createFilterFuncs(key, filterValues) if err != nil { return nil, err @@ -28,7 +29,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err return util.StringMatchRegexSlice(net.Name, filterValues) }, nil - case "driver": + case types.Driver: // matches network driver return func(net types.Network) bool { return util.StringInSlice(net.Driver, filterValues) @@ -45,9 +46,9 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err return createPruneFilterFuncs(key, filterValues) } -func GenerateNetworkPruneFilters(filters map[string][]string) ([]types.FilterFunc, error) { - filterFuncs := make([]types.FilterFunc, 0, len(filters)) - for key, filterValues := range filters { +func GenerateNetworkPruneFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { filterFunc, err := createPruneFilterFuncs(key, filterValues) if err != nil { return nil, err @@ -62,11 +63,11 @@ func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc case "label": // matches all labels return func(net types.Network) bool { - return util.MatchLabelFilters(filterValues, net.Labels) + return filters.MatchLabelFilters(filterValues, net.Labels) }, nil case "until": - until, err := util.ComputeUntilTimestamp(filterValues) + until, err := filters.ComputeUntilTimestamp(filterValues) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libnetwork/util/ip.go b/vendor/github.com/containers/common/libnetwork/util/ip.go new file mode 100644 index 00000000000..7c315e31293 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip.go @@ -0,0 +1,56 @@ +package util + +import ( + "net" +) + +// IsIPv6 returns true if netIP is IPv6. +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +// IsIPv4 returns true if netIP is IPv4. +func IsIPv4(netIP net.IP) bool { + return netIP != nil && netIP.To4() != nil +} + +// LastIPInSubnet gets the last IP in a subnet +func LastIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + for i := range cidr.IP { + cidr.IP[i] |= ^cidr.Mask[i] + } + return cidr.IP, nil +} + +// FirstIPInSubnet gets the first IP in a subnet +func FirstIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + cidr.IP[len(cidr.IP)-1]++ + return cidr.IP, nil +} + +// NormalizeIP will transform the given ip to the 4 byte len ipv4 if possible +func NormalizeIP(ip *net.IP) { + ipv4 := ip.To4() + if ipv4 != nil { + *ip = ipv4 + } +} diff --git a/vendor/github.com/containers/common/libnetwork/util/ip_calc.go b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go new file mode 100644 index 00000000000..a27ddf78bf7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go @@ -0,0 +1,53 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "math/big" + "net" +) + +// NextIP returns IP incremented by 1 +func NextIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Add(i, big.NewInt(1))) +} + +// PrevIP returns IP decremented by 1 +func PrevIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Sub(i, big.NewInt(1))) +} + +// Cmp compares two IPs, returning the usual ordering: +// a < b : -1 +// a == b : 0 +// a > b : 1 +func Cmp(a, b net.IP) int { + aa := ipToInt(a) + bb := ipToInt(b) + return aa.Cmp(bb) +} + +func ipToInt(ip net.IP) *big.Int { + if v := ip.To4(); v != nil { + return big.NewInt(0).SetBytes(v) + } + return big.NewInt(0).SetBytes(ip.To16()) +} + +func intToIP(i *big.Int) net.IP { + return net.IP(i.Bytes()) +} diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 735d194932f..35f79a1adb1 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -1,3 +1,4 @@ +//go:build linux && apparmor // +build linux,apparmor package apparmor @@ -232,7 +233,6 @@ func parseAAParserVersion(output string) (int, error) { // major*10^5 + minor*10^3 + patch*10^0 numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel return numericVersion, nil - } // CheckProfileAndLoadDefault checks if the specified profile is loaded and diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go index 021e32571df..667fa9f2655 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -1,3 +1,4 @@ +//go:build linux && apparmor // +build linux,apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go index 13469f1b629..dacfc2f48c2 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !apparmor // +build !linux !apparmor package apparmor diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go similarity index 99% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/blkio.go rename to vendor/github.com/containers/common/pkg/cgroups/blkio.go index bacd4eb9362..0fb61c75776 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/blkio.go +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio.go @@ -12,8 +12,7 @@ import ( "github.com/pkg/errors" ) -type blkioHandler struct { -} +type blkioHandler struct{} func getBlkioHandler() *blkioHandler { return &blkioHandler{} diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go similarity index 90% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups.go rename to vendor/github.com/containers/common/pkg/cgroups/cgroups.go index 4bb8de69b47..57997d65207 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go @@ -3,6 +3,7 @@ package cgroups import ( "bufio" "bytes" + "context" "fmt" "io/ioutil" "math" @@ -11,7 +12,7 @@ import ( "strconv" "strings" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/storage/pkg/unshare" systemdDbus "github.com/coreos/go-systemd/v22/dbus" "github.com/godbus/dbus/v5" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -129,22 +130,22 @@ func init() { func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) { if cgroup2 { controllers := []controller{} - subtreeControl := cgroupRoot + "/cgroup.subtree_control" - // rootless cgroupv2: check available controllers for current user ,systemd or servicescope will inherit - if rootless.IsRootless() { + controllersFile := cgroupRoot + "/cgroup.controllers" + // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit + if unshare.IsRootless() { userSlice, err := getCgroupPathForCurrentProcess() if err != nil { return controllers, err } - //userSlice already contains '/' so not adding here + // userSlice already contains '/' so not adding here basePath := cgroupRoot + userSlice - subtreeControl = fmt.Sprintf("%s/cgroup.subtree_control", basePath) + controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) } - subtreeControlBytes, err := ioutil.ReadFile(subtreeControl) + controllersFileBytes, err := ioutil.ReadFile(controllersFile) if err != nil { - return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", subtreeControl) + return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile) } - for _, controllerName := range strings.Fields(string(subtreeControlBytes)) { + for _, controllerName := range strings.Fields(string(controllersFileBytes)) { c := controller{ name: controllerName, symlink: false, @@ -157,7 +158,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) subsystems, _ := cgroupV1GetAllSubsystems() controllers := []controller{} // cgroupv1 and rootless: No subsystem is available: delegation is unsafe. - if rootless.IsRootless() { + if unshare.IsRootless() { return controllers, nil } @@ -264,7 +265,7 @@ func createCgroupv2Path(path string) (deferredError error) { for i, e := range elements[3:] { current = filepath.Join(current, e) if i > 0 { - if err := os.Mkdir(current, 0755); err != nil { + if err := os.Mkdir(current, 0o755); err != nil { if !os.IsExist(err) { return err } @@ -280,7 +281,7 @@ func createCgroupv2Path(path string) (deferredError error) { // We enable the controllers for all the path components except the last one. It is not allowed to add // PIDs if there are already enabled controllers. if i < len(elements[3:])-1 { - if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0755); err != nil { + if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0o755); err != nil { return err } } @@ -322,7 +323,7 @@ func (c *CgroupControl) initialize() (err error) { continue } path := c.getCgroupv1Path(ctr.name) - if err := os.MkdirAll(path, 0755); err != nil { + if err := os.MkdirAll(path, 0o755); err != nil { return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name) } } @@ -342,7 +343,7 @@ func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { return false, err } - if err := os.MkdirAll(cPath, 0755); err != nil { + if err := os.MkdirAll(cPath, 0o755); err != nil { return false, errors.Wrapf(err, "error creating cgroup for %s", controller) } return true, nil @@ -364,6 +365,29 @@ func readFileAsUint64(path string) (uint64, error) { return ret, nil } +func readFileByKeyAsUint64(path, key string) (uint64, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + for _, line := range strings.Split(string(content), "\n") { + fields := strings.SplitN(line, " ", 2) + if fields[0] == key { + v := cleanString(string(fields[1])) + if v == "max" { + return math.MaxUint64, nil + } + ret, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ret, errors.Wrapf(err, "parse %s from %s", v, path) + } + return ret, nil + } + } + + return 0, fmt.Errorf("no key named %s from %s", key, path) +} + // New creates a new cgroup control func New(path string, resources *spec.LinuxResources) (*CgroupControl, error) { cgroup2, err := IsCgroup2UnifiedMode() @@ -435,7 +459,7 @@ func Load(path string) (*CgroupControl, error) { // if there is no controller at all, raise an error if !oneExists { - if rootless.IsRootless() { + if unshare.IsRootless() { return nil, ErrCgroupV1Rootless } // compatible with the error code @@ -452,7 +476,7 @@ func (c *CgroupControl) CreateSystemdUnit(path string) error { return fmt.Errorf("the cgroup controller is not using systemd") } - conn, err := systemdDbus.New() + conn, err := systemdDbus.NewWithContext(context.TODO()) if err != nil { return err } @@ -508,32 +532,6 @@ func (c *CgroupControl) Delete() error { return c.DeleteByPath(c.path) } -// rmDirRecursively delete recursively a cgroup directory. -// It differs from os.RemoveAll as it doesn't attempt to unlink files. -// On cgroupfs we are allowed only to rmdir empty directories. -func rmDirRecursively(path string) error { - if err := os.Remove(path); err == nil || os.IsNotExist(err) { - return nil - } - entries, err := ioutil.ReadDir(path) - if err != nil { - return err - } - for _, i := range entries { - if i.IsDir() { - if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { - return err - } - } - } - if err := os.Remove(path); err != nil { - if !os.IsNotExist(err) { - return errors.Wrapf(err, "remove %s", path) - } - } - return nil -} - // DeleteByPathConn deletes the specified cgroup path using the specified // dbus connection if needed. func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) error { @@ -565,7 +563,7 @@ func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) er // DeleteByPath deletes the specified cgroup path func (c *CgroupControl) DeleteByPath(path string) error { if c.systemd { - conn, err := systemdDbus.New() + conn, err := systemdDbus.NewWithContext(context.TODO()) if err != nil { return err } @@ -591,7 +589,7 @@ func (c *CgroupControl) AddPid(pid int) error { if c.cgroup2 { p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") - if err := ioutil.WriteFile(p, pidString, 0644); err != nil { + if err := ioutil.WriteFile(p, pidString, 0o644); err != nil { return errors.Wrapf(err, "write %s", p) } return nil @@ -614,7 +612,7 @@ func (c *CgroupControl) AddPid(pid int) error { continue } p := filepath.Join(c.getCgroupv1Path(n), "tasks") - if err := ioutil.WriteFile(p, pidString, 0644); err != nil { + if err := ioutil.WriteFile(p, pidString, 0o644); err != nil { return errors.Wrapf(err, "write %s", p) } } diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go similarity index 65% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups_supported.go rename to vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go index fe17db7f7a3..edb28ad1807 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups_supported.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cgroups @@ -5,11 +6,13 @@ package cgroups import ( "bufio" "fmt" + "io/ioutil" "os" "path/filepath" "strings" "sync" "syscall" + "time" "github.com/pkg/errors" "golang.org/x/sys/unix" @@ -88,3 +91,40 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) { } return true, nil } + +// rmDirRecursively delete recursively a cgroup directory. +// It differs from os.RemoveAll as it doesn't attempt to unlink files. +// On cgroupfs we are allowed only to rmdir empty directories. +func rmDirRecursively(path string) error { + if err := os.Remove(path); err == nil || os.IsNotExist(err) { + return nil + } + entries, err := ioutil.ReadDir(path) + if err != nil { + return err + } + for _, i := range entries { + if i.IsDir() { + if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { + return err + } + } + } + + attempts := 0 + for { + err := os.Remove(path) + if err == nil || os.IsNotExist(err) { + return nil + } + if errors.Is(err, unix.EBUSY) { + // attempt up to 5 seconds if the cgroup is busy + if attempts < 500 { + time.Sleep(time.Millisecond * 10) + attempts++ + continue + } + } + return errors.Wrapf(err, "remove %s", path) + } +} diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go similarity index 76% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups_unsupported.go rename to vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go index cd140fbf3c9..b3dcb2d33da 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go @@ -1,7 +1,12 @@ +//go:build !linux // +build !linux package cgroups +import ( + "os" +) + // IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. func IsCgroup2UnifiedMode() (bool, error) { return false, nil @@ -12,3 +17,7 @@ func IsCgroup2UnifiedMode() (bool, error) { func UserOwnsCurrentSystemdCgroup() (bool, error) { return false, nil } + +func rmDirRecursively(path string) error { + return os.RemoveAll(path) +} diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go similarity index 99% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/cpu.go rename to vendor/github.com/containers/common/pkg/cgroups/cpu.go index 23539757d23..c9e94f26977 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/cpu.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpu.go @@ -12,8 +12,7 @@ import ( "github.com/pkg/errors" ) -type cpuHandler struct { -} +type cpuHandler struct{} func getCPUHandler() *cpuHandler { return &cpuHandler{} diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/cpuset.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go similarity index 93% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/cpuset.go rename to vendor/github.com/containers/common/pkg/cgroups/cpuset.go index 46d0484f2cf..2bfeb80db5a 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/cpuset.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go @@ -10,8 +10,7 @@ import ( "github.com/pkg/errors" ) -type cpusetHandler struct { -} +type cpusetHandler struct{} func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { if dir == cgroupRoot { @@ -26,14 +25,14 @@ func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { if err != nil { return nil, errors.Wrapf(err, "open %s", path) } - if len(strings.Trim(string(data), "\n")) != 0 { + if strings.Trim(string(data), "\n") != "" { return data, nil } data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) if err != nil { return nil, err } - if err := ioutil.WriteFile(path, data, 0644); err != nil { + if err := ioutil.WriteFile(path, data, 0o644); err != nil { return nil, errors.Wrapf(err, "write %s", path) } return data, nil diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/memory.go b/vendor/github.com/containers/common/pkg/cgroups/memory.go similarity index 70% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/memory.go rename to vendor/github.com/containers/common/pkg/cgroups/memory.go index b3991f7e3ef..10d65893c6c 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/memory.go +++ b/vendor/github.com/containers/common/pkg/cgroups/memory.go @@ -7,8 +7,7 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" ) -type memHandler struct { -} +type memHandler struct{} func getMemoryHandler() *memHandler { return &memHandler{} @@ -41,22 +40,23 @@ func (c *memHandler) Stat(ctr *CgroupControl, m *Metrics) error { usage := MemoryUsage{} var memoryRoot string - filenames := map[string]string{} + var limitFilename string if ctr.cgroup2 { memoryRoot = filepath.Join(cgroupRoot, ctr.path) - filenames["usage"] = "memory.current" - filenames["limit"] = "memory.max" + limitFilename = "memory.max" + if usage.Usage, err = readFileByKeyAsUint64(filepath.Join(memoryRoot, "memory.stat"), "anon"); err != nil { + return err + } } else { memoryRoot = ctr.getCgroupv1Path(Memory) - filenames["usage"] = "memory.usage_in_bytes" - filenames["limit"] = "memory.limit_in_bytes" + limitFilename = "memory.limit_in_bytes" + if usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil { + return err + } } - usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, filenames["usage"])) - if err != nil { - return err - } - usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, filenames["limit"])) + + usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename)) if err != nil { return err } diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go similarity index 94% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/pids.go rename to vendor/github.com/containers/common/pkg/cgroups/pids.go index b2bfebe4da8..650120a560f 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/pids.go +++ b/vendor/github.com/containers/common/pkg/cgroups/pids.go @@ -8,8 +8,7 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" ) -type pidHandler struct { -} +type pidHandler struct{} func getPidsHandler() *pidHandler { return &pidHandler{} @@ -29,11 +28,14 @@ func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { } p := filepath.Join(PIDRoot, "pids.max") - return ioutil.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0644) + return ioutil.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0o644) } // Create the cgroup func (c *pidHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } return ctr.createCgroupDirectory(Pids) } diff --git a/vendor/github.com/containers/podman/v3/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go similarity index 91% rename from vendor/github.com/containers/podman/v3/pkg/cgroups/systemd.go rename to vendor/github.com/containers/common/pkg/cgroups/systemd.go index f26988c5a2d..92065a2d7cd 100644 --- a/vendor/github.com/containers/podman/v3/pkg/cgroups/systemd.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd.go @@ -1,6 +1,7 @@ package cgroups import ( + "context" "fmt" "path/filepath" "strings" @@ -37,7 +38,7 @@ func systemdCreate(path string, c *systemdDbus.Conn) error { } ch := make(chan string) - _, err := c.StartTransientUnit(name, "replace", properties, ch) + _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) if err != nil { lastError = err continue @@ -70,7 +71,7 @@ func systemdDestroyConn(path string, c *systemdDbus.Conn) error { name := filepath.Base(path) ch := make(chan string) - _, err := c.StopUnit(name, "replace", ch) + _, err := c.StopUnitContext(context.TODO(), name, "replace", ch) if err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go index 61b3653e57c..f61bd3bb26b 100644 --- a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package cgroupv2 diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go index 921927de4cd..61327fd2485 100644 --- a/vendor/github.com/containers/common/pkg/chown/chown_unix.go +++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package chown @@ -40,7 +41,6 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error { return nil }) - if err != nil { return errors.Wrap(err, "failed to chown recursively host path") } diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index d5be77edd33..a86eca88efd 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -2,6 +2,7 @@ package config import ( "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -10,6 +11,7 @@ import ( "sync" "github.com/BurntSushi/toml" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" @@ -48,6 +50,18 @@ const ( BoltDBStateStore RuntimeStateStore = iota ) +// ProxyEnv is a list of Proxy Environment variables +var ProxyEnv = []string{ + "http_proxy", + "https_proxy", + "ftp_proxy", + "no_proxy", + "HTTP_PROXY", + "HTTPS_PROXY", + "FTP_PROXY", + "NO_PROXY", +} + // Config contains configuration options for container tools type Config struct { // Containers specify settings that configure how containers will run ont the system @@ -60,6 +74,8 @@ type Config struct { Network NetworkConfig `toml:"network"` // Secret section defines configurations for the secret management Secrets SecretConfig `toml:"secrets"` + // ConfigMap section defines configurations for the configmaps management + ConfigMaps ConfigMapConfig `toml:"configmaps"` } // ContainersConfig represents the "containers" TOML config table @@ -79,6 +95,13 @@ type ContainersConfig struct { // Annotation to add to all containers Annotations []string `toml:"annotations,omitempty"` + // BaseHostsFile is the path to a hosts file, the entries from this file + // are added to the containers hosts file. As special value "image" is + // allowed which uses the /etc/hosts file from within the image and "none" + // which uses no base file at all. If it is empty we should default + // to /etc/hosts. + BaseHostsFile string `toml:"base_hosts_file,omitempty"` + // Default way to create a cgroup namespace for the container CgroupNS string `toml:"cgroupns,omitempty"` @@ -120,6 +143,9 @@ type ContainersConfig struct { // EnvHost Pass all host environment variables into the container. EnvHost bool `toml:"env_host,omitempty"` + // HostContainersInternalIP is used to set a specific host.containers.internal ip. + HostContainersInternalIP string `toml:"host_containers_internal_ip,omitempty"` + // HTTPProxy is the proxy environment variable list to apply to container process HTTPProxy bool `toml:"http_proxy,omitempty"` @@ -167,11 +193,6 @@ type ContainersConfig struct { // performance implications. PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` - // RootlessNetworking depicts the "kind" of networking for rootless - // containers. Valid options are `slirp4netns` and `cni`. Default is - // `slirp4netns` on Linux, and `cni` on non-Linux OSes. - RootlessNetworking string `toml:"rootless_networking,omitempty"` - // SeccompProfile is the seccomp.json profile path which is used as the // default for the runtime. SeccompProfile string `toml:"seccomp_profile,omitempty"` @@ -215,6 +236,12 @@ type EngineConfig struct { // The first path pointing to a valid file will be used. ConmonPath []string `toml:"conmon_path,omitempty"` + // CompatAPIEnforceDockerHub enforces using docker.io for completing + // short names in Podman's compatibility REST API. Note that this will + // ignore unqualified-search-registries and short-name aliases defined + // in containers-registries.conf(5). + CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"` + // DetachKeys is the sequence of keys used to detach a container. DetachKeys string `toml:"detach_keys,omitempty"` @@ -233,6 +260,10 @@ type EngineConfig struct { // EventsLogFilePath is where the events log is stored. EventsLogFilePath string `toml:"events_logfile_path,omitempty"` + // EventsLogFileMaxSize sets the maximum size for the events log. When the limit is exceeded, + // the logfile is rotated and the old one is deleted. + EventsLogFileMaxSize eventsLogMaxSize `toml:"events_logfile_max_size,omitzero"` + // EventsLogger determines where events should be logged. EventsLogger string `toml:"events_logger,omitempty"` @@ -281,6 +312,8 @@ type EngineConfig struct { LockType string `toml:"lock_type,omitempty"` // MachineEnabled indicates if Podman is running in a podman-machine VM + // + // This method is soft deprecated, use machine.IsPodmanMachine instead MachineEnabled bool `toml:"machine_enabled,omitempty"` // MultiImageArchive - if true, the container engine allows for storing @@ -316,6 +349,9 @@ type EngineConfig struct { // OCIRuntimes are the set of configured OCI runtimes (default is runc). OCIRuntimes map[string][]string `toml:"runtimes,omitempty"` + // PodExitPolicy determines the behaviour when the last container of a pod exits. + PodExitPolicy PodExitPolicy `toml:"pod_exit_policy,omitempty"` + // PullPolicy determines whether to pull image before creating or running a container // default is "missing" PullPolicy string `toml:"pull_policy,omitempty"` @@ -390,6 +426,10 @@ type EngineConfig struct { // before sending kill signal. StopTimeout uint `toml:"stop_timeout,omitempty,omitzero"` + // ExitCommandDelay is the number of seconds to wait for the exit + // command to be send to the API process on the server. + ExitCommandDelay uint `toml:"exit_command_delay,omitempty,omitzero"` + // ImageCopyTmpDir is the default location for storing temporary // container image content, Can be overridden with the TMPDIR // environment variable. If you specify "storage", then the @@ -414,6 +454,9 @@ type EngineConfig struct { // ChownCopiedFiles tells the container engine whether to chown files copied // into a container to the container's primary uid/gid. ChownCopiedFiles bool `toml:"chown_copied_files,omitempty"` + + // CompressionFormat is the compression format used to compress image layers. + CompressionFormat string `toml:"compression_format,omitempty"` } // SetOptions contains a subset of options in a Config. It's used to indicate if @@ -461,23 +504,43 @@ type SetOptions struct { // NetworkConfig represents the "network" TOML config table type NetworkConfig struct { + // NetworkBackend determines what backend should be used for Podman's + // networking. + NetworkBackend string `toml:"network_backend,omitempty"` + // CNIPluginDirs is where CNI plugin binaries are stored. CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` - // DefaultNetwork is the network name of the default CNI network + // DefaultNetwork is the network name of the default network // to attach pods to. DefaultNetwork string `toml:"default_network,omitempty"` - // DefaultSubnet is the subnet to be used for the default CNI network. + // DefaultSubnet is the subnet to be used for the default network. // If a network with the name given in DefaultNetwork is not present // then a new network using this subnet will be created. // Must be a valid IPv4 CIDR block. DefaultSubnet string `toml:"default_subnet,omitempty"` - // NetworkConfigDir is where CNI network configuration files are stored. + // DefaultSubnetPools is a list of subnets and size which are used to + // allocate subnets automatically for podman network create. + // It will iterate through the list and will pick the first free subnet + // with the given size. This is only used for ipv4 subnets, ipv6 subnets + // are always assigned randomly. + DefaultSubnetPools []SubnetPool `toml:"default_subnet_pools,omitempty"` + + // NetworkConfigDir is where network configuration files are stored. NetworkConfigDir string `toml:"network_config_dir,omitempty"` } +type SubnetPool struct { + // Base is a bigger subnet which will be used to allocate a subnet with + // the given size. + Base *types.IPNet `toml:"base,omitempty"` + // Size is the CIDR for the new subnet. It must be equal or small + // than the CIDR from the base subnet. + Size int `toml:"size,omitempty"` +} + // SecretConfig represents the "secret" TOML config table type SecretConfig struct { // Driver specifies the secret driver to use. @@ -489,6 +552,17 @@ type SecretConfig struct { Opts map[string]string `toml:"opts,omitempty"` } +// ConfigMapConfig represents the "configmap" TOML config table +type ConfigMapConfig struct { + // Driver specifies the configmap driver to use. + // Current valid value: + // * file + // * pass + Driver string `toml:"driver,omitempty"` + // Opts contains driver specific options + Opts map[string]string `toml:"opts,omitempty"` +} + // MachineConfig represents the "machine" TOML config table type MachineConfig struct { // Number of CPU's a machine is created with. @@ -499,6 +573,10 @@ type MachineConfig struct { Image string `toml:"image,omitempty"` // Memory in MB a machine is created with. Memory uint64 `toml:"memory,omitempty,omitzero"` + // User to use for rootless podman when init-ing a podman machine VM + User string `toml:"user,omitempty"` + // Volumes are host directories mounted into the VM by default. + Volumes []string `toml:"volumes"` } // Destination represents destination for remote service @@ -518,7 +596,6 @@ type Destination struct { // with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This // might change in the future. func NewConfig(userConfigPath string) (*Config, error) { - // Generate the default config for the system config, err := DefaultConfig() if err != nil { @@ -559,6 +636,10 @@ func NewConfig(userConfigPath string) (*Config, error) { return nil, err } + if err := config.setupEnv(); err != nil { + return nil, err + } + return config, nil } @@ -574,7 +655,7 @@ func readConfigFromFile(path string, config *Config) error { } keys := meta.Undecoded() if len(keys) > 0 { - logrus.Warningf("Failed to decode the keys %q from %q.", keys, path) + logrus.Debugf("Failed to decode the keys %q from %q.", keys, path) } return nil @@ -585,17 +666,14 @@ func readConfigFromFile(path string, config *Config) error { func addConfigs(dirPath string, configs []string) ([]string, error) { newConfigs := []string{} - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: - // this should only happen when err != nil but let's be sure - return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir @@ -701,7 +779,6 @@ func (c *Config) addCAPPrefix() { // Validate is the main entry point for library configuration validation. func (c *Config) Validate() error { - if err := c.Containers.Validate(); err != nil { return errors.Wrap(err, "validating containers config") } @@ -758,7 +835,6 @@ func (c *EngineConfig) Validate() error { // It returns an `error` on validation failure, otherwise // `nil`. func (c *ContainersConfig) Validate() error { - if err := c.validateUlimits(); err != nil { return err } @@ -791,18 +867,18 @@ func (c *ContainersConfig) Validate() error { // execution checks. It returns an `error` on validation failure, otherwise // `nil`. func (c *NetworkConfig) Validate() error { - expectedConfigDir := _cniConfigDir - if unshare.IsRootless() { - home, err := unshare.HomeDir() - if err != nil { - return err - } - expectedConfigDir = filepath.Join(home, _cniConfigDirRootless) - } - if c.NetworkConfigDir != expectedConfigDir { - err := isDirectory(c.NetworkConfigDir) - if err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "invalid network_config_dir: %s", c.NetworkConfigDir) + if &c.DefaultSubnetPools != &DefaultSubnetPools { + for _, pool := range c.DefaultSubnetPools { + if pool.Base.IP.To4() == nil { + return errors.Errorf("invalid subnet pool ip %q", pool.Base.IP) + } + ones, _ := pool.Base.IPNet.Mask.Size() + if ones > pool.Size { + return errors.Errorf("invalid subnet pool, size is bigger than subnet %q", &pool.Base.IPNet) + } + if pool.Size > 32 { + return errors.New("invalid subnet pool size, must be between 0-32") + } } } @@ -878,8 +954,7 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { if envHost { env = append(env, os.Environ()...) } else if httpProxy { - proxy := []string{"http_proxy", "https_proxy", "ftp_proxy", "no_proxy", "HTTP_PROXY", "HTTPS_PROXY", "FTP_PROXY", "NO_PROXY"} - for _, p := range proxy { + for _, p := range ProxyEnv { if val, ok := os.LookupEnv(p); ok { env = append(env, fmt.Sprintf("%s=%s", p, val)) } @@ -891,7 +966,6 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { // Capabilities returns the capabilities parses the Add and Drop capability // list from the default capabiltiies for the container func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { - userNotRoot := func(user string) bool { if user == "" || user == "root" || user == "0" { return false @@ -951,7 +1025,7 @@ func Device(device string) (src, dst, permissions string, err error) { // IsValidDeviceMode checks if the mode for device is valid or not. // IsValid mode is a composition of r (read), w (write), and m (mknod). func IsValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ + legalDeviceMode := map[rune]bool{ 'r': true, 'w': true, 'm': true, @@ -1002,7 +1076,6 @@ func rootlessConfigPath() (string, error) { } func stringsEq(a, b []string) bool { - if len(a) != len(b) { return false } @@ -1087,10 +1160,10 @@ func (c *Config) Write() error { if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { return err } - configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) + configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644) if err != nil { return err } @@ -1142,7 +1215,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) { // FindHelperBinary will search the given binary name in the configured directories. // If searchPATH is set to true it will also search in $PATH. func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { - for _, path := range c.Engine.HelperBinariesDir { + dir_list := c.Engine.HelperBinariesDir + + // If set, search this directory first. This is used in testing. + if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { + dir_list = append([]string{dir}, dir_list...) + } + + for _, path := range dir_list { fullpath := filepath.Join(path, name) if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { return fullpath, nil @@ -1151,13 +1231,14 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) if searchPATH { return exec.LookPath(name) } + configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." if len(c.Engine.HelperBinariesDir) == 0 { - return "", errors.Errorf("could not find %q because there are no helper binary directories configured", name) + return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) } - return "", errors.Errorf("could not find %q in one of %v", name, c.Engine.HelperBinariesDir) + return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) } -// ImageCopyTmpDir default directory to store tempory image files during copy +// ImageCopyTmpDir default directory to store temporary image files during copy func (c *Config) ImageCopyTmpDir() (string, error) { if path, found := os.LookupEnv("TMPDIR"); found { return path, nil @@ -1175,3 +1256,53 @@ func (c *Config) ImageCopyTmpDir() (string, error) { return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) } + +// setupEnv sets the environment variables for the engine +func (c *Config) setupEnv() error { + for _, env := range c.Engine.Env { + splitEnv := strings.SplitN(env, "=", 2) + if len(splitEnv) != 2 { + logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) + continue + } + // skip if the env is already defined + if _, ok := os.LookupEnv(splitEnv[0]); ok { + logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", splitEnv[0]) + continue + } + if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil { + return err + } + } + return nil +} + +// eventsLogMaxSize is the type used by EventsLogFileMaxSize +type eventsLogMaxSize uint64 + +// UnmarshalText parses the JSON encoding of eventsLogMaxSize and +// stores it in a value. +func (e *eventsLogMaxSize) UnmarshalText(text []byte) error { + // REMOVE once writing works + if string(text) == "" { + return nil + } + val, err := units.FromHumanSize((string(text))) + if err != nil { + return err + } + if val < 0 { + return fmt.Errorf("events log file max size cannot be negative: %s", string(text)) + } + *e = eventsLogMaxSize(uint64(val)) + return nil +} + +// MarshalText returns the JSON encoding of eventsLogMaxSize. +func (e eventsLogMaxSize) MarshalText() ([]byte, error) { + if uint64(e) == DefaultEventsLogSizeMax || e == 0 { + v := []byte{} + return v, nil + } + return []byte(fmt.Sprintf("%d", e)), nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go new file mode 100644 index 00000000000..85404a48ddf --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -0,0 +1,25 @@ +package config + +import ( + "os" +) + +// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. +func customConfigFile() (string, error) { + if path, found := os.LookupEnv("CONTAINERS_CONF"); found { + return path, nil + } + return rootlessConfigPath() +} + +func ifRootlessConfigPath() (string, error) { + return rootlessConfigPath() +} + +var defaultHelperBinariesDir = []string{ + "/usr/local/bin", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/local/libexec/podman", + "/usr/local/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index 21dab043f87..bfb9675824b 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -1,3 +1,4 @@ +//go:build !remote // +build !remote package config diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go index 7fd9202bbfc..bff869efa96 100644 --- a/vendor/github.com/containers/common/pkg/config/config_remote.go +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -1,3 +1,4 @@ +//go:build remote // +build remote package config diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go index 6563fd31743..64e4fcfcdf5 100644 --- a/vendor/github.com/containers/common/pkg/config/config_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package config diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 1d3c003e304..a4e755a665b 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -26,6 +26,13 @@ # #apparmor_profile = "container-default" +# The hosts entries from the base hosts file are added to the containers hosts +# file. This must be either an absolute path or as special values "image" which +# uses the hosts file from the container image or "none" which means +# no base hosts file is used. The default is "" which will use /etc/hosts. +# +#base_hosts_file = "" + # Default way to to create a cgroup namespace for the container # Options are: # `private` Create private Cgroup Namespace for the container. @@ -114,6 +121,16 @@ default_sysctls = [ # #env_host = false +# Set the ip for the host.containers.internal entry in the containers /etc/hosts +# file. This can be set to "none" to disable adding this entry. By default it +# will automatically choose the host ip. +# +# NOTE: When using podman machine this entry will never be added to the containers +# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore +# it is not possible to disable the entry in this case. +# +#host_containers_internal_ip = "" + # Default proxy environment variables passed into the container. # The environment variables passed in include: # http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of @@ -133,10 +150,12 @@ default_sysctls = [ # Default way to to create an IPC namespace (POSIX SysV IPC) for the container # Options are: -# `private` Create private IPC Namespace for the container. -# `host` Share host IPC Namespace with the container. +# "host" Share host IPC Namespace with the container. +# "none" Create shareable IPC Namespace for the container without a private /dev/shm. +# "private" Create private IPC Namespace for the container, other containers are not allowed to share it. +# "shareable" Create shareable IPC Namespace for the container. # -#ipcns = "private" +#ipcns = "shareable" # keyring tells the container engine whether to create # a kernel keyring for use within the container. @@ -197,10 +216,6 @@ default_sysctls = [ # #prepare_volume_on_create = false -# Indicates the networking to be used for rootless containers -# -#rootless_networking = "slirp4netns" - # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. # @@ -249,9 +264,6 @@ default_sysctls = [ # #volumes = [] -# The network table contains settings pertaining to the management of -# CNI plugins. - [secrets] #driver = "file" @@ -260,6 +272,16 @@ default_sysctls = [ [network] +# Network backend determines what network driver will be used to set up and tear down container networks. +# Valid values are "cni" and "netavark". +# The default value is empty which means that it will automatically choose CNI or netavark. If there are +# already containers/images or CNI networks preset it will choose CNI. +# +# Before changing this value all containers must be stopped otherwise it is likely that +# iptables rules and network interfaces might leak on the host. A reboot will fix this. +# +#network_backend = "" + # Path to directory where CNI plugin binaries are located. # #cni_plugin_dirs = [ @@ -270,18 +292,36 @@ default_sysctls = [ # "/opt/cni/bin", #] -# The network name of the default CNI network to attach pods to. +# The network name of the default network to attach pods to. # #default_network = "podman" -# The default subnet for the default CNI network given in default_network. +# The default subnet for the default network given in default_network. # If a network with that name does not exist, a new network using that name and # this subnet will be created. # Must be a valid IPv4 CIDR prefix. # #default_subnet = "10.88.0.0/16" -# Path to the directory where CNI configuration files are located. +# DefaultSubnetPools is a list of subnets and size which are used to +# allocate subnets automatically for podman network create. +# It will iterate through the list and will pick the first free subnet +# with the given size. This is only used for ipv4 subnets, ipv6 subnets +# are always assigned randomly. +# +#default_subnet_pools = [ +# {"base" = "10.89.0.0/16", "size" = 24}, +# {"base" = "10.90.0.0/15", "size" = 24}, +# {"base" = "10.92.0.0/14", "size" = 24}, +# {"base" = "10.96.0.0/11", "size" = 24}, +# {"base" = "10.128.0.0/9", "size" = 24}, +#] + +# Path to the directory where network configuration files are located. +# For the CNI backend the default is "/etc/cni/net.d" as root +# and "$HOME/.config/cni/net.d" as rootless. +# For the netavark backend "/etc/containers/networks" is used as root +# and "$graphroot/networks" as rootless. # #network_config_dir = "/etc/cni/net.d/" @@ -290,6 +330,12 @@ default_sysctls = [ # #active_service = production +# The compression format to use when pushing an image. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# +#compression_format = "gzip" + + # Cgroup management implementation used for the runtime. # Valid options "systemd" or "cgroupfs" # @@ -313,6 +359,11 @@ default_sysctls = [ # "/usr/local/sbin/conmon" #] +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + # Specify the keys sequence used to detach a container. # Format is a single character [a-Z] or a comma separated sequence of # `ctrl-`, where `` is one of: @@ -336,6 +387,18 @@ default_sysctls = [ # #env = [] +# Define where event logs will be stored, when events_logger is "file". +#events_logfile_path="" + +# Sets the maximum size for events_logfile_path. +# The size can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). +# The format for the size is ``, e.g., `1b` or `3g`. +# If no unit is included then the size will be read in bytes. +# When the limit is exceeded, the logfile will be rotated and the old one will be deleted. +# If the maximum size is set to 0, then no limit will be applied, +# and the logfile will not be rotated. +#events_logfile_max_size = "1m" + # Selects which logging mechanism to use for container engine events. # Valid values are `journald`, `file` and `none`. # @@ -378,24 +441,20 @@ default_sysctls = [ # Infra (pause) container image name for pod infra containers. When running a # pod, we start a `pause` process in a container to hold open the namespaces # associated with the pod. This container does nothing other then sleep, -# reserving the pods resources for the lifetime of the pod. +# reserving the pods resources for the lifetime of the pod. By default container +# engines run a builtin container using the pause executable. If you want override +# specify an image to pull. # -#infra_image = "k8s.gcr.io/pause:3.4.1" +#infra_image = "" # Specify the locking mechanism to use; valid values are "shm" and "file". # Change the default only if you are sure of what you are doing, in general # "file" is useful only on platforms where cgo is not available for using the -# faster "shm" lock type. You may need to run "podman system renumber" after +# faster "shm" lock type. You may need to run "podman system renumber" after # you change the lock type. # #lock_type** = "shm" -# Indicates if Podman is running inside a VM via Podman Machine. -# Podman uses this value to do extra setup around networking from the -# container inside the VM to to host. -# -#machine_enabled = false - # MultiImageArchive - if true, the container engine allows for storing archives # (e.g., of the docker-archive transport) with multiple images. By default, # Podman creates single-image archives. @@ -416,9 +475,26 @@ default_sysctls = [ #network_cmd_path = "" # Default options to pass to the slirp4netns binary. -# For example "allow_host_loopback=true" -# -#network_cmd_options = ["enable_ipv6=true",] +# Valid options values are: +# +# - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`). +# Default is false. +# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`). +# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`). +# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`). +# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only). +# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to. +# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only). +# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to. +# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default. +# Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container +# network namespace, usually `10.0.2.100`. If your application requires the real source IP address, +# e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for +# rootless containers when connected to user-defined networks. +# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but +# preserves the correct source IP address. This port handler cannot be used for user-defined networks. +# +#network_cmd_options = [] # Whether to use chroot instead of pivot_root in the runtime # @@ -430,6 +506,9 @@ default_sysctls = [ # #num_locks = 2048 +# Set the exit policy of the pod when the last container exits. +#pod_exit_policy = "continue" + # Whether to pull new image before running a container # #pull_policy = "missing" @@ -444,7 +523,7 @@ default_sysctls = [ # #runtime = "crun" -# List of the OCI runtimes that support --format=json. When json is supported +# List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # #runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] @@ -457,8 +536,8 @@ default_sysctls = [ # #runtime_supports_nocgroups = ["crun", "krun"] -# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment -# variable. If you specify "storage", then the location of the +# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment +# variable. If you specify "storage", then the location of the # container/storage tmp directory will be used. # image_copy_tmp_dir="/var/tmp" @@ -478,6 +557,11 @@ default_sysctls = [ # #stop_timeout = 10 +# Number of seconds to wait before exit command in API process is given to. +# This mimics Docker's exec cleanup behaviour, where the default is 5 minutes (value is in seconds). +# +#exit_command_delay = 300 + # map of service destinations # #[service_destinations] @@ -485,9 +569,9 @@ default_sysctls = [ # URI to access the Podman service # Examples: # rootless "unix://run/user/$UID/podman/podman.sock" (Default) -# rootfull "unix://run/podman/podman.sock (Default) +# rootful "unix://run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock -# remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock +# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock # # uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock" # Path to file containing ssh identity key @@ -572,6 +656,20 @@ default_sysctls = [ # #memory=2048 +# The username to use and create on the podman machine OS for rootless +# container access. +# +#user = "core" + +# Host directories to be mounted as volumes into the VM by default. +# Environment variables like $HOME as well as complete paths are supported for +# the source and destination. An optional third field `:ro` can be used to +# tell the container engines to mount the volume readonly. +# +# volumes = [ +# "$HOME:$HOME", +#] + # The [machine] table MUST be the last entry in this file. # (Unless another table is added) # TOML does not provide a way to end a table other than a further table being diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index e72e1b3e447..8979a406bc7 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -3,14 +3,18 @@ package config import ( "bytes" "fmt" + "net" "os" "os/exec" "path/filepath" "regexp" "strconv" + "strings" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" @@ -45,7 +49,7 @@ var ( // DefaultInitPath is the default path to the container-init binary DefaultInitPath = "/usr/libexec/podman/catatonit" // DefaultInfraImage to use for infra container - DefaultInfraImage = "k8s.gcr.io/pause:3.5" + DefaultInfraImage = "" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks DefaultRootlessSHMLockPath = "/libpod_rootless_lock" // DefaultDetachKeys is the default keys sequence for detaching a @@ -84,8 +88,29 @@ var ( "/usr/lib/cni", "/opt/cni/bin", } + DefaultSubnetPools = []SubnetPool{ + // 10.89.0.0/24-10.255.255.0/24 + parseSubnetPool("10.89.0.0/16", 24), + parseSubnetPool("10.90.0.0/15", 24), + parseSubnetPool("10.92.0.0/14", 24), + parseSubnetPool("10.96.0.0/11", 24), + parseSubnetPool("10.128.0.0/9", 24), + } + // additionalHelperBinariesDir is an extra helper binaries directory that + // should be set during link-time, if different packagers put their + // helper binary in a different location + additionalHelperBinariesDir string ) +// nolint:unparam +func parseSubnetPool(subnet string, size int) SubnetPool { + _, n, _ := net.ParseCIDR(subnet) + return SubnetPool{ + Base: &nettypes.IPNet{IPNet: *n}, + Size: size, + } +} + const ( // _etcDir is the sysconfdir where podman should look for system config files. // It can be overridden at build time. @@ -93,19 +118,20 @@ const ( // InstallPrefix is the prefix where podman will be installed. // It can be overridden at build time. _installPrefix = "/usr" - // _cniConfigDir is the directory where cni configuration is found - _cniConfigDir = "/etc/cni/net.d/" - // _cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins - _cniConfigDirRootless = "cni/net.d/" // CgroupfsCgroupsManager represents cgroupfs native cgroup manager CgroupfsCgroupsManager = "cgroupfs" // DefaultApparmorProfile specifies the default apparmor profile for the container. DefaultApparmorProfile = apparmor.Profile + // DefaultHostsFile is the default path to the hosts file + DefaultHostsFile = "/etc/hosts" // SystemdCgroupsManager represents systemd native cgroup manager SystemdCgroupsManager = "systemd" // DefaultLogSizeMax is the default value for the maximum log size // allowed for a container. Negative values mean that no limit is imposed. DefaultLogSizeMax = -1 + // DefaultEventsLogSize is the default value for the maximum events log size + // before rotation. + DefaultEventsLogSizeMax = uint64(1000000) // DefaultPidsLimit is the default value for maximum number of processes // allowed inside a container DefaultPidsLimit = 2048 @@ -114,7 +140,7 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - // DefaultSubnet is the subnet that will be used for the default CNI + // DefaultSubnet is the subnet that will be used for the default // network. DefaultSubnet = "10.88.0.0/16" // DefaultRootlessSignaturePolicyPath is the location within @@ -134,14 +160,11 @@ const ( // DefaultConfig defines the default values from containers.conf func DefaultConfig() (*Config, error) { - defaultEngineConfig, err := defaultConfigFromMemory() if err != nil { return nil, err } - cniConfig := _cniConfigDir - defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath if unshare.IsRootless() { configHome, err := homedir.GetConfigHome() @@ -155,7 +178,6 @@ func DefaultConfig() (*Config, error) { defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath } } - cniConfig = filepath.Join(configHome, _cniConfigDirRootless) } cgroupNS := "host" @@ -169,6 +191,7 @@ func DefaultConfig() (*Config, error) { Volumes: []string{}, Annotations: []string{}, ApparmorProfile: DefaultApparmorProfile, + BaseHostsFile: "", CgroupNS: cgroupNS, Cgroups: "enabled", DefaultCapabilities: DefaultCapabilities, @@ -183,28 +206,28 @@ func DefaultConfig() (*Config, error) { "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", }, - EnvHost: false, - HTTPProxy: true, - Init: false, - InitPath: "", - IPCNS: "private", - LogDriver: defaultLogDriver(), - LogSizeMax: DefaultLogSizeMax, - NoHosts: false, - PidsLimit: DefaultPidsLimit, - PidNS: "private", - RootlessNetworking: getDefaultRootlessNetwork(), - ShmSize: DefaultShmSize, - TZ: "", - Umask: "0022", - UTSNS: "private", - UserNSSize: DefaultUserNSSize, + EnvHost: false, + HTTPProxy: true, + Init: false, + InitPath: "", + IPCNS: "shareable", + LogDriver: defaultLogDriver(), + LogSizeMax: DefaultLogSizeMax, + NetNS: "private", + NoHosts: false, + PidsLimit: DefaultPidsLimit, + PidNS: "private", + ShmSize: DefaultShmSize, + TZ: "", + Umask: "0022", + UTSNS: "private", + UserNSSize: DefaultUserNSSize, }, Network: NetworkConfig{ - DefaultNetwork: "podman", - DefaultSubnet: DefaultSubnet, - NetworkConfigDir: cniConfig, - CNIPluginDirs: DefaultCNIPluginDirs, + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + DefaultSubnetPools: DefaultSubnetPools, + CNIPluginDirs: DefaultCNIPluginDirs, }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -224,9 +247,11 @@ func defaultSecretConfig() SecretConfig { func defaultMachineConfig() MachineConfig { return MachineConfig{ CPUs: 1, - DiskSize: 10, - Image: "testing", + DiskSize: 100, + Image: getDefaultMachineImage(), Memory: 2048, + User: getDefaultMachineUser(), + Volumes: []string{"$HOME:$HOME"}, } } @@ -242,6 +267,10 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") + c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) + + c.CompatAPIEnforceDockerHub = true + if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { types.SetDefaultConfigFilePath(path) } @@ -255,11 +284,14 @@ func defaultConfigFromMemory() (*EngineConfig, error) { storeOpts.GraphRoot = _defaultGraphRoot } c.graphRoot = storeOpts.GraphRoot - c.ImageCopyTmpDir = "/var/tmp" + c.ImageCopyTmpDir = getDefaultTmpDir() c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") c.HelperBinariesDir = defaultHelperBinariesDir + if additionalHelperBinariesDir != "" { + c.HelperBinariesDir = append(c.HelperBinariesDir, additionalHelperBinariesDir) + } c.HooksDir = DefaultHooksDirs c.ImageDefaultTransport = _defaultTransport c.StateType = BoltDBStateStore @@ -269,9 +301,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.CgroupManager = defaultCgroupManager() c.ServiceTimeout = uint(5) c.StopTimeout = uint(10) - c.NetworkCmdOptions = []string{ - "enable_ipv6=true", - } + c.ExitCommandDelay = uint(5 * 60) c.Remote = isRemote() c.OCIRuntimes = map[string][]string{ "crun": { @@ -358,6 +388,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.MachineEnabled = false c.ChownCopiedFiles = true + c.PodExitPolicy = defaultPodExitPolicy + return c, nil } @@ -366,16 +398,16 @@ func defaultTmpDir() (string, error) { return "/run/libpod", nil } - runtimeDir, err := getRuntimeDir() + runtimeDir, err := util.GetRuntimeDir() if err != nil { return "", err } libpodRuntimeDir := filepath.Join(runtimeDir, "libpod") - if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + if err := os.Mkdir(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { if !os.IsExist(err) { return "", err - } else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil { + } else if err := os.Chmod(libpodRuntimeDir, 0o700|os.ModeSticky); err != nil { // The directory already exist, just set the sticky bit return "", errors.Wrap(err, "set sticky bit on") } @@ -389,15 +421,14 @@ func probeConmon(conmonBinary string) error { cmd := exec.Command(conmonBinary, "--version") var out bytes.Buffer cmd.Stdout = &out - err := cmd.Run() - if err != nil { + if err := cmd.Run(); err != nil { return err } r := regexp.MustCompile(`^conmon version (?P\d+).(?P\d+).(?P\d+)`) matches := r.FindStringSubmatch(out.String()) if len(matches) != 4 { - return errors.Wrap(err, _conmonVersionFormatErr) + return errors.New(_conmonVersionFormatErr) } major, err := strconv.Atoi(matches[1]) if err != nil { @@ -440,6 +471,10 @@ func (c *Config) NetNS() string { return c.Containers.NetNS } +func (c EngineConfig) EventsLogMaxSize() uint64 { + return uint64(c.EventsLogFileMaxSize) +} + // SecurityOptions returns the default security options func (c *Config) SecurityOptions() []string { securityOpts := []string{} @@ -570,8 +605,23 @@ func (c *Config) MachineEnabled() bool { return c.Engine.MachineEnabled } -// RootlessNetworking returns the "kind" of networking -// rootless containers should use -func (c *Config) RootlessNetworking() string { - return c.Containers.RootlessNetworking +// MachineVolumes returns volumes to mount into the VM +func (c *Config) MachineVolumes() ([]string, error) { + return machineVolumes(c.Machine.Volumes) +} + +func machineVolumes(volumes []string) ([]string, error) { + translatedVolumes := []string{} + for _, v := range volumes { + vol := os.ExpandEnv(v) + split := strings.Split(vol, ":") + if len(split) < 2 || len(split) > 3 { + return nil, errors.Errorf("invalid machine volume %s, 2 or 3 fields required", v) + } + if split[0] == "" || split[1] == "" { + return nil, errors.Errorf("invalid machine volume %s, fields must container data", v) + } + translatedVolumes = append(translatedVolumes, vol) + } + return translatedVolumes, nil } diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index c68c0b130fc..d6ea4359cc3 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -3,6 +3,7 @@ package config import ( "fmt" "io/ioutil" + "os" "strconv" "strings" @@ -13,10 +14,15 @@ const ( oldMaxSize = uint64(1048576) ) -// getDefaultRootlessNetwork returns the default rootless network configuration. -// It is "slirp4netns" for Linux. -func getDefaultRootlessNetwork() string { - return "slirp4netns" +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" } // getDefaultProcessLimits returns the nproc for the current process in ulimits format @@ -43,3 +49,12 @@ func getDefaultProcessLimits() []string { } return defaultLimits } + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index e38fb810de8..4be8267558d 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -1,11 +1,19 @@ -// +build !linux +//go:build !linux && !windows +// +build !linux,!windows package config -// getDefaultRootlessNetwork returns the default rootless network configuration. -// It is "cni" for non-Linux OSes (to better support `podman-machine` usecases). -func getDefaultRootlessNetwork() string { - return "cni" +import "os" + +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" } // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. @@ -17,3 +25,12 @@ func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { func getDefaultProcessLimits() []string { return []string{} } + +// getDefaultTmpDir for linux +func getDefaultTmpDir() string { + // first check the TMPDIR env var + if path, found := os.LookupEnv("TMPDIR"); found { + return path + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go new file mode 100644 index 00000000000..db230dfb286 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -0,0 +1,34 @@ +package config + +import "os" + +// getDefaultImage returns the default machine image stream +// On Windows this refers to the Fedora major release number +func getDefaultMachineImage() string { + return "35" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "user" +} + +// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. +func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { + return false, nil +} + +// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format +func getDefaultProcessLimits() []string { + return []string{} +} + +// getDefaultTmpDir for windows +func getDefaultTmpDir() string { + // first check the Temp env var + // https://answers.microsoft.com/en-us/windows/forum/all/where-is-the-temporary-folder/44a039a5-45ba-48dd-84db-fd700e54fd56 + if val, ok := os.LookupEnv("TEMP"); ok { + return val + } + return os.Getenv("LOCALAPPDATA") + "\\Temp" +} diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go index 2a3b6fb35f1..352fddf92cc 100644 --- a/vendor/github.com/containers/common/pkg/config/nosystemd.go +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -1,3 +1,4 @@ +//go:build !systemd || !cgo // +build !systemd !cgo package config @@ -22,3 +23,7 @@ func defaultLogDriver() string { func useSystemd() bool { return false } + +func useJournald() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go new file mode 100644 index 00000000000..f0f983077dd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/pod_exit_policy.go @@ -0,0 +1,36 @@ +package config + +import "fmt" + +// PodExitPolicies includes the supported pod exit policies. +var PodExitPolicies = []string{string(PodExitPolicyContinue), string(PodExitPolicyStop)} + +// PodExitPolicy determines a pod's exit and stop behaviour. +type PodExitPolicy string + +const ( + // PodExitPolicyContinue instructs the pod to continue running when the + // last container has exited. + PodExitPolicyContinue PodExitPolicy = "continue" + // PodExitPolicyStop instructs the pod to stop when the last container + // has exited. + PodExitPolicyStop = "stop" + // PodExitPolicyUnsupported implies an internal error. + // Negative for backwards compat. + PodExitPolicyUnsupported = "invalid" + + defaultPodExitPolicy = PodExitPolicyContinue +) + +// ParsePodExitPolicy parses the specified policy and returns an error if it is +// invalid. +func ParsePodExitPolicy(policy string) (PodExitPolicy, error) { + switch policy { + case "", string(PodExitPolicyContinue): + return PodExitPolicyContinue, nil + case string(PodExitPolicyStop): + return PodExitPolicyStop, nil + default: + return PodExitPolicyUnsupported, fmt.Errorf("invalid pod exit policy: %q", policy) + } +} diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index fab3ea437a8..03d19a12f30 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -1,15 +1,16 @@ +//go:build systemd && cgo // +build systemd,cgo package config import ( "io/ioutil" + "path/filepath" "strings" "sync" "github.com/containers/common/pkg/cgroupv2" "github.com/containers/storage/pkg/unshare" - "github.com/coreos/go-systemd/v22/sdjournal" ) var ( @@ -57,7 +58,6 @@ func useSystemd() bool { val := strings.TrimSuffix(string(dat), "\n") usesSystemd = (val == "systemd") } - return }) return usesSystemd } @@ -67,13 +67,20 @@ func useJournald() bool { if !useSystemd() { return } - journal, err := sdjournal.NewJournal() - if err != nil { - return + for _, root := range []string{"/run/log/journal", "/var/log/journal"} { + dirs, err := ioutil.ReadDir(root) + if err != nil { + continue + } + for _, d := range dirs { + if d.IsDir() { + if _, err := ioutil.ReadDir(filepath.Join(root, d.Name())); err == nil { + usesJournald = true + return + } + } + } } - journal.Close() - usesJournald = true - return }) return usesJournald } diff --git a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go b/vendor/github.com/containers/common/pkg/defaultnet/default_network.go deleted file mode 100644 index 9b32241d65c..00000000000 --- a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go +++ /dev/null @@ -1,222 +0,0 @@ -package defaultnet - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "regexp" - "text/template" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// TODO: A smarter implementation would make sure cni-podman0 was unused before -// making the default, and adjust if necessary -const networkTemplate = `{ - "cniVersion": "0.4.0", - "name": "{{{{.Name}}}}", - "plugins": [ - { - "type": "bridge", - "bridge": "cni-podman0", - "isGateway": true, - "ipMasq": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "routes": [{ "dst": "0.0.0.0/0" }], - "ranges": [ - [ - { - "subnet": "{{{{.Subnet}}}}", - "gateway": "{{{{.Gateway}}}}" - } - ] - ] - } - }, -{{{{- if (eq .Machine true) }}}} - { - "type": "podman-machine", - "capabilities": { - "portMappings": true - } - }, -{{{{- end}}}} - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - }, - { - "type": "firewall" - }, - { - "type": "tuning" - } - ] -} -` - -var ( - // Borrowed from Podman, modified to remove dashes and periods. - nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_]*$") -) - -// Used to pass info into the template engine -type networkInfo struct { - Name string - Subnet string - Gateway string - Machine bool -} - -// The most trivial definition of a CNI network possible for our use here. -// We need the name, and nothing else. -type network struct { - Name string `json:"name"` -} - -// Create makes the CNI default network, if necessary. -// Accepts the name and subnet of the network to create (a standard template -// will be used, with these values plugged in), the configuration directory -// where CNI configs are stored (to verify if a named configuration already -// exists), an exists directory (where a sentinel file will be stored, to ensure -// the network is only made once), and an isMachine bool (to determine whether -// the machine block will be added to the config). -// Create first checks if a default network has already been created via the -// presence of a sentinel file. If it does exist, it returns immediately without -// error. -// It next checks if a CNI network with the given name already exists. In that -// case, it creates the sentinel file and returns without error. -// If neither of these are true, the default network is created. -func Create(name, subnet, configDir, existsDir string, isMachine bool) error { - // TODO: Should probably regex name to make sure it's valid. - if name == "" || subnet == "" || configDir == "" || existsDir == "" { - return errors.Errorf("must provide values for all arguments to MakeDefaultNetwork") - } - if !nameRegex.MatchString(name) { - return errors.Errorf("invalid default network name %s - letters, numbers, and underscores only", name) - } - - sentinelFile := filepath.Join(existsDir, "defaultCNINetExists") - - // Check if sentinel file exists, return immediately if it does. - if _, err := os.Stat(sentinelFile); err == nil { - return nil - } - - // Create the sentinel file if it doesn't exist, so subsequent checks - // don't need to go further. - file, err := os.Create(sentinelFile) - if err != nil { - return err - } - file.Close() - - // We may need to make the config dir. - if err := os.MkdirAll(configDir, 0755); err != nil && !os.IsExist(err) { - return errors.Wrapf(err, "error creating CNI configuration directory") - } - - // Check all networks in the CNI conflist. - files, err := ioutil.ReadDir(configDir) - if err != nil { - return errors.Wrapf(err, "error reading CNI configuration directory") - } - if len(files) > 0 { - configPaths := make([]string, 0, len(files)) - for _, path := range files { - if !path.IsDir() && filepath.Ext(path.Name()) == ".conflist" { - configPaths = append(configPaths, filepath.Join(configDir, path.Name())) - } - } - for _, config := range configPaths { - configName, err := getConfigName(config) - if err != nil { - logrus.Errorf("Error reading CNI configuration file: %v", err) - continue - } - if configName == name { - return nil - } - } - } - - // We need to make the config. - // Get subnet and gateway. - _, ipNet, err := net.ParseCIDR(subnet) - if err != nil { - return errors.Wrapf(err, "default network subnet %s is invalid", subnet) - } - - ones, bits := ipNet.Mask.Size() - if ones == bits { - return errors.Wrapf(err, "default network subnet %s is to small", subnet) - } - gateway := make(net.IP, len(ipNet.IP)) - // copy the subnet ip to the gateway so we can modify it - copy(gateway, ipNet.IP) - // the default gateway should be the first ip in the subnet - gateway[len(gateway)-1]++ - - netInfo := new(networkInfo) - netInfo.Name = name - netInfo.Gateway = gateway.String() - netInfo.Subnet = ipNet.String() - netInfo.Machine = isMachine - - templ, err := template.New("network_template").Delims("{{{{", "}}}}").Parse(networkTemplate) - if err != nil { - return errors.Wrapf(err, "error compiling template for default network") - } - var output bytes.Buffer - if err := templ.Execute(&output, netInfo); err != nil { - return errors.Wrapf(err, "error executing template for default network") - } - - // Next, we need to place the config on disk. - // Loop through possible indexes, with a limit of 100 attempts. - created := false - for i := 87; i < 187; i++ { - configFile, err := os.OpenFile(filepath.Join(configDir, fmt.Sprintf("%d-%s.conflist", i, name)), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) - if err != nil { - logrus.Infof("Attempt to create default CNI network config file failed: %v", err) - continue - } - defer configFile.Close() - - created = true - - // Success - file is open. Write our buffer to it. - if _, err := configFile.Write(output.Bytes()); err != nil { - return errors.Wrapf(err, "error writing default CNI config to file") - } - break - } - if !created { - return errors.Errorf("no available default network configuration file was found") - } - - return nil -} - -// Get the name of the configuration contained in a given conflist file. Accepts -// the full path of a .conflist CNI configuration. -func getConfigName(file string) (string, error) { - contents, err := ioutil.ReadFile(file) - if err != nil { - return "", err - } - config := new(network) - if err := json.Unmarshal(contents, config); err != nil { - return "", errors.Wrapf(err, "error decoding CNI configuration %s", filepath.Base(file)) - } - return config.Name, nil -} diff --git a/vendor/github.com/containers/common/pkg/download/download.go b/vendor/github.com/containers/common/pkg/download/download.go new file mode 100644 index 00000000000..abf4c87739e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/download/download.go @@ -0,0 +1,31 @@ +package download + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// FromURL downloads the specified source to a file in tmpdir (OS defaults if +// empty). +func FromURL(tmpdir, source string) (string, error) { + tmp, err := ioutil.TempFile(tmpdir, "") + if err != nil { + return "", fmt.Errorf("creating temporary download file: %w", err) + } + defer tmp.Close() + + response, err := http.Get(source) // nolint:noctx + if err != nil { + return "", fmt.Errorf("downloading %s: %w", source, err) + } + defer response.Body.Close() + + _, err = io.Copy(tmp, response.Body) + if err != nil { + return "", fmt.Errorf("copying %s to %s: %w", source, tmp.Name(), err) + } + + return tmp.Name(), nil +} diff --git a/vendor/github.com/containers/common/pkg/flag/flag.go b/vendor/github.com/containers/common/pkg/flag/flag.go new file mode 100644 index 00000000000..7d6b6a5343d --- /dev/null +++ b/vendor/github.com/containers/common/pkg/flag/flag.go @@ -0,0 +1,174 @@ +package flag + +import ( + "strconv" + + "github.com/spf13/pflag" +) + +// OptionalBool is a boolean with a separate presence flag and value. +type OptionalBool struct { + present bool + value bool +} + +// Present returns the bool's presence flag. +func (ob *OptionalBool) Present() bool { + return ob.present +} + +// Present returns the bool's value. Should only be used if Present() is true. +func (ob *OptionalBool) Value() bool { + return ob.value +} + +// optionalBool is a cli.Generic == flag.Value implementation equivalent to +// the one underlying flag.Bool, except that it records whether the flag has been set. +// This is distinct from optionalBool to (pretend to) force callers to use +// optionalBoolFlag +type optionalBoolValue OptionalBool + +// OptionalBoolFlag creates new flag for an optional in the specified flag with +// the specified name and usage. +func OptionalBoolFlag(fs *pflag.FlagSet, p *OptionalBool, name, usage string) *pflag.Flag { + flag := fs.VarPF(internalNewOptionalBoolValue(p), name, "", usage) + flag.NoOptDefVal = "true" + flag.DefValue = "false" + return flag +} + +// WARNING: Do not directly use this method to define optionalBool flag. +// Caller should use optionalBoolFlag +func internalNewOptionalBoolValue(p *OptionalBool) pflag.Value { + p.present = false + return (*optionalBoolValue)(p) +} + +// Set parses the string to a bool and sets it. +func (ob *optionalBoolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + ob.value = v + ob.present = true + return nil +} + +// String returns the string representation of the string. +func (ob *optionalBoolValue) String() string { + if !ob.present { + return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true + } + return strconv.FormatBool(ob.value) +} + +// Type returns the type. +func (ob *optionalBoolValue) Type() string { + return "bool" +} + +// IsBoolFlag indicates that it's a bool flag. +func (ob *optionalBoolValue) IsBoolFlag() bool { + return true +} + +// OptionalString is a string with a separate presence flag. +type OptionalString struct { + present bool + value string +} + +// Present returns the strings's presence flag. +func (os *OptionalString) Present() bool { + return os.present +} + +// Present returns the string's value. Should only be used if Present() is true. +func (os *OptionalString) Value() string { + return os.value +} + +// optionalString is a cli.Generic == flag.Value implementation equivalent to +// the one underlying flag.String, except that it records whether the flag has been set. +// This is distinct from optionalString to (pretend to) force callers to use +// newoptionalString +type optionalStringValue OptionalString + +// NewOptionalStringValue returns a pflag.Value for the string. +func NewOptionalStringValue(p *OptionalString) pflag.Value { + p.present = false + return (*optionalStringValue)(p) +} + +// Set sets the string. +func (ob *optionalStringValue) Set(s string) error { + ob.value = s + ob.present = true + return nil +} + +// String returns the string if present. +func (ob *optionalStringValue) String() string { + if !ob.present { + return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""} + } + return ob.value +} + +// Type returns the string type. +func (ob *optionalStringValue) Type() string { + return "string" +} + +// OptionalInt is a int with a separate presence flag. +type OptionalInt struct { + present bool + value int +} + +// Present returns the int's presence flag. +func (oi *OptionalInt) Present() bool { + return oi.present +} + +// Present returns the int's value. Should only be used if Present() is true. +func (oi *OptionalInt) Value() int { + return oi.value +} + +// optionalInt is a cli.Generic == flag.Value implementation equivalent to +// the one underlying flag.Int, except that it records whether the flag has been set. +// This is distinct from optionalInt to (pretend to) force callers to use +// newoptionalIntValue +type optionalIntValue OptionalInt + +// NewOptionalIntValue returns the pflag.Value of the int. +func NewOptionalIntValue(p *OptionalInt) pflag.Value { + p.present = false + return (*optionalIntValue)(p) +} + +// Set parses the string to an int and sets it. +func (ob *optionalIntValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, strconv.IntSize) + if err != nil { + return err + } + ob.value = int(v) + ob.present = true + return nil +} + +// String returns the string representation of the int. +func (ob *optionalIntValue) String() string { + if !ob.present { + return "" // If the value is not present, just return an empty string, any other value wouldn't make sense. + } + return strconv.Itoa(int(ob.value)) +} + +// Type returns the int's type. +func (ob *optionalIntValue) Type() string { + return "int" +} diff --git a/vendor/github.com/containers/common/pkg/machine/machine.go b/vendor/github.com/containers/common/pkg/machine/machine.go new file mode 100644 index 00000000000..465eeceaf1b --- /dev/null +++ b/vendor/github.com/containers/common/pkg/machine/machine.go @@ -0,0 +1,70 @@ +package machine + +import ( + "os" + "strings" + "sync" + + "github.com/containers/common/pkg/config" + "github.com/sirupsen/logrus" +) + +type MachineMarker struct { + Enabled bool + Type string +} + +const ( + markerFile = "/etc/containers/podman-machine" + Wsl = "wsl" + Qemu = "qemu" +) + +var ( + markerSync sync.Once + machineMarker *MachineMarker +) + +func loadMachineMarker(file string) { + var kind string + + // Support deprecated config value for compatibility + enabled := isLegacyConfigSet() + + if content, err := os.ReadFile(file); err == nil { + enabled = true + kind = strings.TrimSpace(string(content)) + } + + machineMarker = &MachineMarker{enabled, kind} +} + +func isLegacyConfigSet() bool { + config, err := config.Default() + if err != nil { + logrus.Warnf("could not obtain container configuration") + return false + } + + //nolint:staticcheck //lint:ignore SA1019 deprecated call + return config.Engine.MachineEnabled +} + +func IsPodmanMachine() bool { + return GetMachineMarker().Enabled +} + +func MachineHostType() string { + return GetMachineMarker().Type +} + +func IsGvProxyBased() bool { + return IsPodmanMachine() && MachineHostType() != Wsl +} + +func GetMachineMarker() *MachineMarker { + markerSync.Do(func() { + loadMachineMarker(markerFile) + }) + return machineMarker +} diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index ea9495ee73a..75ffac06c36 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -16,31 +16,22 @@ import ( type List interface { AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, os, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error Remove(instanceDigest digest.Digest) error - SetURLs(instanceDigest digest.Digest, urls []string) error URLs(instanceDigest digest.Digest) ([]string, error) - SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error Annotations(instanceDigest *digest.Digest) (map[string]string, error) - SetOS(instanceDigest digest.Digest, os string) error OS(instanceDigest digest.Digest) (string, error) - SetArchitecture(instanceDigest digest.Digest, arch string) error Architecture(instanceDigest digest.Digest) (string, error) - SetOSVersion(instanceDigest digest.Digest, osVersion string) error OSVersion(instanceDigest digest.Digest) (string, error) - SetVariant(instanceDigest digest.Digest, variant string) error Variant(instanceDigest digest.Digest) (string, error) - SetFeatures(instanceDigest digest.Digest, features []string) error Features(instanceDigest digest.Digest) ([]string, error) - SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error OSFeatures(instanceDigest digest.Digest) ([]string, error) - Serialize(mimeType string) ([]byte, error) Instances() []digest.Digest OCIv1() *v1.Index @@ -74,13 +65,14 @@ func Create() List { }, oci: v1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, }, } } // AddInstance adds an entry for the specified manifest digest, with assorted // additional information specified in parameters, to the list or index. -func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features []string, annotations []string) error { +func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { if err := l.Remove(manifestDigest); err != nil && !os.IsNotExist(errors.Cause(err)) { return err } @@ -373,6 +365,7 @@ func FromBlob(manifestBytes []byte) (List, error) { }, oci: v1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, }, } switch manifestType { @@ -449,38 +442,37 @@ func (l *list) preferOCI() bool { // Serialize encodes the list using the specified format, or by selecting one // which it thinks is appropriate. func (l *list) Serialize(mimeType string) ([]byte, error) { - var manifestBytes []byte + var ( + res []byte + err error + ) switch mimeType { case "": if l.preferOCI() { - manifest, err := json.Marshal(&l.oci) + res, err = json.Marshal(&l.oci) if err != nil { return nil, errors.Wrapf(err, "error marshalling OCI image index") } - manifestBytes = manifest } else { - manifest, err := json.Marshal(&l.docker) + res, err = json.Marshal(&l.docker) if err != nil { return nil, errors.Wrapf(err, "error marshalling Docker manifest list") } - manifestBytes = manifest } case v1.MediaTypeImageIndex: - manifest, err := json.Marshal(&l.oci) + res, err = json.Marshal(&l.oci) if err != nil { return nil, errors.Wrapf(err, "error marshalling OCI image index") } - manifestBytes = manifest case manifest.DockerV2ListMediaType: - manifest, err := json.Marshal(&l.docker) + res, err = json.Marshal(&l.docker) if err != nil { return nil, errors.Wrapf(err, "error marshalling Docker manifest list") } - manifestBytes = manifest default: return nil, errors.Wrapf(ErrManifestTypeNotSupported, "serializing list to type %q not implemented", mimeType) } - return manifestBytes, nil + return res, nil } // Instances returns the list of image instances mentioned in this list. diff --git a/vendor/github.com/containers/podman/v3/pkg/netns/netns_linux.go b/vendor/github.com/containers/common/pkg/netns/netns_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v3/pkg/netns/netns_linux.go rename to vendor/github.com/containers/common/pkg/netns/netns_linux.go index 3e6e668b5fe..b3459b21373 100644 --- a/vendor/github.com/containers/podman/v3/pkg/netns/netns_linux.go +++ b/vendor/github.com/containers/common/pkg/netns/netns_linux.go @@ -29,8 +29,8 @@ import ( "sync" "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -38,7 +38,7 @@ import ( // GetNSRunDir returns the dir of where to create the netNS. When running // rootless, it needs to be at a location writable by user. func GetNSRunDir() (string, error) { - if rootless.IsRootless() { + if unshare.IsRootless() { rootlessDir, err := util.GetRuntimeDir() if err != nil { return "", err @@ -56,7 +56,7 @@ func NewNS() (ns.NetNS, error) { if err != nil { return nil, fmt.Errorf("failed to generate random netns name: %v", err) } - nsName := fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + nsName := fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) return NewNSWithName(nsName) } @@ -71,7 +71,7 @@ func NewNSWithName(name string) (ns.NetNS, error) { // Create the directory for mounting network namespaces // This needs to be a shared mountpoint in case it is mounted in to // other namespaces (containers) - err = os.MkdirAll(nsRunDir, 0755) + err = os.MkdirAll(nsRunDir, 0o755) if err != nil { return nil, err } @@ -152,7 +152,7 @@ func NewNSWithName(name string) (ns.NetNS, error) { // Put this thread back to the orig ns, since it might get reused (pre go1.10) defer func() { if err := origNS.Set(); err != nil { - if rootless.IsRootless() && strings.Contains(err.Error(), "operation not permitted") { + if unshare.IsRootless() && strings.Contains(err.Error(), "operation not permitted") { // When running in rootless mode it will fail to re-join // the network namespace owned by root on the host. return @@ -180,13 +180,13 @@ func NewNSWithName(name string) (ns.NetNS, error) { } // UnmountNS unmounts the NS held by the netns object -func UnmountNS(ns ns.NetNS) error { +func UnmountNS(netns ns.NetNS) error { nsRunDir, err := GetNSRunDir() if err != nil { return err } - nsPath := ns.Path() + nsPath := netns.Path() // Only unmount if it's been bind-mounted (don't touch namespaces in /proc...) if strings.HasPrefix(nsPath, nsRunDir) { if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil { diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 02e670c50c8..6c4958cc2b6 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -14,9 +14,31 @@ import ( // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) ([]string, error) { - var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int finalOpts := make([]string, 0, len(options)) for _, opt := range options { + // support advanced options like upperdir=/path, workdir=/path + if strings.Contains(opt, "upperdir") { + foundUpperDir++ + if foundUpperDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.Contains(opt, "workdir") { + foundWorkDir++ + if foundWorkDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.HasPrefix(opt, "idmap") { + finalOpts = append(finalOpts, opt) + continue + } + switch opt { case "noexec", "exec": foundExec++ @@ -119,7 +141,7 @@ func Device(device string) (src, dest, permissions string, err error) { // isValidDeviceMode checks if the mode for device is valid or not. // isValid mode is a composition of r (read), w (write), and m (mknod). func isValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ + legalDeviceMode := map[rune]bool{ 'r': true, 'w': true, 'm': true, diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go index ce4446a1b00..d087c4a02b6 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package parse diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go index 43e3a668882..a9573e4e8a8 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry.go +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -45,8 +45,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions func isRetryable(err error) bool { err = errors.Cause(err) - if err == context.Canceled || err == context.DeadlineExceeded { + switch err { + case nil: return false + case context.Canceled, context.DeadlineExceeded: + return false + default: // continue } type unwrapper interface { @@ -57,7 +61,8 @@ func isRetryable(err error) bool { case errcode.Error: switch e.Code { - case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied, + errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: return false } return true @@ -86,7 +91,7 @@ func isRetryable(err error) bool { } } return true - case unwrapper: + case unwrapper: // Test this last, because various error types might implement .Unwrap() err = e.Unwrap() return isRetryable(err) } diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go index 6769809755b..901e28a5dcc 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package retry diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go index cf333744c53..3712afc71f1 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -80,6 +80,7 @@ func DefaultProfile() *Seccomp { "vmsplice", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, }, @@ -168,6 +169,7 @@ func DefaultProfile() *Seccomp { "futex", "futex_time64", "futimesat", + "get_mempolicy", "get_robust_list", "get_thread_area", "getcpu", @@ -183,7 +185,6 @@ func DefaultProfile() *Seccomp { "getgroups", "getgroups32", "getitimer", - "get_mempolicy", "getpeername", "getpgid", "getpgrp", @@ -235,6 +236,7 @@ func DefaultProfile() *Seccomp { "lstat64", "madvise", "mbind", + "membarrier", "memfd_create", "memfd_secret", "mincore", @@ -248,6 +250,7 @@ func DefaultProfile() *Seccomp { "mmap", "mmap2", "mount", + "mount_setattr", "move_mount", "mprotect", "mq_getsetattr", @@ -271,9 +274,9 @@ func DefaultProfile() *Seccomp { "nanosleep", "newfstatat", "open", + "open_tree", "openat", "openat2", - "open_tree", "pause", "pidfd_getfd", "pidfd_open", @@ -292,8 +295,12 @@ func DefaultProfile() *Seccomp { "preadv", "preadv2", "prlimit64", + "process_mrelease", + "process_vm_readv", + "process_vm_writev", "pselect6", "pselect6_time64", + "ptrace", "pwrite64", "pwritev", "pwritev2", @@ -352,7 +359,6 @@ func DefaultProfile() *Seccomp { "sendmmsg", "sendmsg", "sendto", - "setns", "set_mempolicy", "set_robust_list", "set_thread_area", @@ -366,6 +372,7 @@ func DefaultProfile() *Seccomp { "setgroups", "setgroups32", "setitimer", + "setns", "setpgid", "setpriority", "setregid", @@ -387,10 +394,15 @@ func DefaultProfile() *Seccomp { "shmdt", "shmget", "shutdown", + "sigaction", "sigaltstack", + "signal", "signalfd", "signalfd4", + "sigpending", + "sigprocmask", "sigreturn", + "sigsuspend", "socketcall", "socketpair", "splice", @@ -404,6 +416,7 @@ func DefaultProfile() *Seccomp { "sync", "sync_file_range", "syncfs", + "syscall", "sysinfo", "syslog", "tee", @@ -416,6 +429,7 @@ func DefaultProfile() *Seccomp { "timer_gettime64", "timer_settime", "timer_settime64", + "timerfd", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", @@ -516,10 +530,10 @@ func DefaultProfile() *Seccomp { Names: []string{ "arm_fadvise64_64", "arm_sync_file_range", - "sync_file_range2", "breakpoint", "cacheflush", "set_tls", + "sync_file_range2", }, Action: ActAllow, Args: []*Arg{}, @@ -574,6 +588,7 @@ func DefaultProfile() *Seccomp { "open_by_handle_at", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -609,6 +624,7 @@ func DefaultProfile() *Seccomp { "setns", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -630,6 +646,7 @@ func DefaultProfile() *Seccomp { "chroot", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -639,8 +656,8 @@ func DefaultProfile() *Seccomp { { Names: []string{ "delete_module", - "init_module", "finit_module", + "init_module", "query_module", }, Action: ActAllow, @@ -652,11 +669,12 @@ func DefaultProfile() *Seccomp { { Names: []string{ "delete_module", - "init_module", "finit_module", + "init_module", "query_module", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -678,6 +696,7 @@ func DefaultProfile() *Seccomp { "acct", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -688,9 +707,6 @@ func DefaultProfile() *Seccomp { Names: []string{ "kcmp", "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace", }, Action: ActAllow, Args: []*Arg{}, @@ -702,11 +718,9 @@ func DefaultProfile() *Seccomp { Names: []string{ "kcmp", "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -715,8 +729,8 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "iopl", "ioperm", + "iopl", }, Action: ActAllow, Args: []*Arg{}, @@ -726,10 +740,11 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "iopl", "ioperm", + "iopl", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -738,10 +753,10 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "settimeofday", - "stime", "clock_settime", "clock_settime64", + "settimeofday", + "stime", }, Action: ActAllow, Args: []*Arg{}, @@ -751,12 +766,13 @@ func DefaultProfile() *Seccomp { }, { Names: []string{ - "settimeofday", - "stime", "clock_settime", "clock_settime64", + "settimeofday", + "stime", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -778,6 +794,7 @@ func DefaultProfile() *Seccomp { "vhangup", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -789,6 +806,7 @@ func DefaultProfile() *Seccomp { "socket", }, Action: ActErrno, + Errno: "EINVAL", ErrnoRet: &einval, Args: []*Arg{ { @@ -867,6 +885,7 @@ func DefaultProfile() *Seccomp { return &Seccomp{ DefaultAction: ActErrno, + DefaultErrno: "ENOSYS", DefaultErrnoRet: &enosys, ArchMap: arches(), Syscalls: syscalls, diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go new file mode 100644 index 00000000000..87ac2ab77ac --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go @@ -0,0 +1,94 @@ +//go:build linux && seccomp +// +build linux,seccomp + +package seccomp + +import ( + "golang.org/x/sys/unix" +) + +// Error table +var errnoArch = map[string]uint{ + "EPERM": uint(unix.EPERM), + "ENOENT": uint(unix.ENOENT), + "ESRCH": uint(unix.ESRCH), + "EIO": uint(unix.EIO), + "ENXIO": uint(unix.ENXIO), + "E2BIG": uint(unix.E2BIG), + "ENOEXEC": uint(unix.ENOEXEC), + "EBADF": uint(unix.EBADF), + "ECHILD": uint(unix.ECHILD), + "EDEADLK": uint(unix.EDEADLK), + "ENOMEM": uint(unix.ENOMEM), + "EACCES": uint(unix.EACCES), + "EFAULT": uint(unix.EFAULT), + "ENOTBLK": uint(unix.ENOTBLK), + "EBUSY": uint(unix.EBUSY), + "EEXIST": uint(unix.EEXIST), + "EXDEV": uint(unix.EXDEV), + "ENODEV": uint(unix.ENODEV), + "ENOTDIR": uint(unix.ENOTDIR), + "EISDIR": uint(unix.EISDIR), + "EINVAL": uint(unix.EINVAL), + "ENFILE": uint(unix.ENFILE), + "EMFILE": uint(unix.EMFILE), + "ENOTTY": uint(unix.ENOTTY), + "ETXTBSY": uint(unix.ETXTBSY), + "EFBIG": uint(unix.EFBIG), + "ENOSPC": uint(unix.ENOSPC), + "ESPIPE": uint(unix.ESPIPE), + "EROFS": uint(unix.EROFS), + "EMLINK": uint(unix.EMLINK), + "EPIPE": uint(unix.EPIPE), + "EDOM": uint(unix.EDOM), + "ERANGE": uint(unix.ERANGE), + "EAGAIN": uint(unix.EAGAIN), + "EINPROGRESS": uint(unix.EINPROGRESS), + "EALREADY": uint(unix.EALREADY), + "ENOTSOCK": uint(unix.ENOTSOCK), + "EDESTADDRREQ": uint(unix.EDESTADDRREQ), + "EMSGSIZE": uint(unix.EMSGSIZE), + "EPROTOTYPE": uint(unix.EPROTOTYPE), + "ENOPROTOOPT": uint(unix.ENOPROTOOPT), + "EPROTONOSUPPORT": uint(unix.EPROTONOSUPPORT), + "ESOCKTNOSUPPORT": uint(unix.ESOCKTNOSUPPORT), + "EOPNOTSUPP": uint(unix.EOPNOTSUPP), + "EPFNOSUPPORT": uint(unix.EPFNOSUPPORT), + "EAFNOSUPPORT": uint(unix.EAFNOSUPPORT), + "EADDRINUSE": uint(unix.EADDRINUSE), + "EADDRNOTAVAIL": uint(unix.EADDRNOTAVAIL), + "ENETDOWN": uint(unix.ENETDOWN), + "ENETUNREACH": uint(unix.ENETUNREACH), + "ENETRESET": uint(unix.ENETRESET), + "ECONNABORTED": uint(unix.ECONNABORTED), + "ECONNRESET": uint(unix.ECONNRESET), + "ENOBUFS": uint(unix.ENOBUFS), + "EISCONN": uint(unix.EISCONN), + "ENOTCONN": uint(unix.ENOTCONN), + "ESHUTDOWN": uint(unix.ESHUTDOWN), + "ETOOMANYREFS": uint(unix.ETOOMANYREFS), + "ETIMEDOUT": uint(unix.ETIMEDOUT), + "ECONNREFUSED": uint(unix.ECONNREFUSED), + "ELOOP": uint(unix.ELOOP), + "ENAMETOOLONG": uint(unix.ENAMETOOLONG), + "EHOSTDOWN": uint(unix.EHOSTDOWN), + "EHOSTUNREACH": uint(unix.EHOSTUNREACH), + "ENOTEMPTY": uint(unix.ENOTEMPTY), + "EUSERS": uint(unix.EUSERS), + "EDQUOT": uint(unix.EDQUOT), + "ESTALE": uint(unix.ESTALE), + "EREMOTE": uint(unix.EREMOTE), + "ENOLCK": uint(unix.ENOLCK), + "ENOSYS": uint(unix.ENOSYS), + "EILSEQ": uint(unix.EILSEQ), + "ENOMEDIUM": uint(unix.ENOMEDIUM), + "EMEDIUMTYPE": uint(unix.EMEDIUMTYPE), + "EOVERFLOW": uint(unix.EOVERFLOW), + "ECANCELED": uint(unix.ECANCELED), + "EIDRM": uint(unix.EIDRM), + "ENOMSG": uint(unix.ENOMSG), + "ENOTSUP": uint(unix.ENOTSUP), + "EBADMSG": uint(unix.EBADMSG), + "ENOTRECOVERABLE": uint(unix.ENOTRECOVERABLE), + "EOWNERDEAD": uint(unix.EOWNERDEAD), +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go index 90da99f0a45..5c278574cff 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/filter.go +++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp // NOTE: this package has originally been copied from diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json index c009134e3cc..442632e7d11 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -1,6 +1,7 @@ { "defaultAction": "SCMP_ACT_ERRNO", "defaultErrnoRet": 38, + "defaultErrno": "ENOSYS", "archMap": [ { "architecture": "SCMP_ARCH_X86_64", @@ -87,7 +88,8 @@ "comment": "", "includes": {}, "excludes": {}, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -174,6 +176,7 @@ "futex", "futex_time64", "futimesat", + "get_mempolicy", "get_robust_list", "get_thread_area", "getcpu", @@ -189,7 +192,6 @@ "getgroups", "getgroups32", "getitimer", - "get_mempolicy", "getpeername", "getpgid", "getpgrp", @@ -241,6 +243,7 @@ "lstat64", "madvise", "mbind", + "membarrier", "memfd_create", "memfd_secret", "mincore", @@ -254,6 +257,7 @@ "mmap", "mmap2", "mount", + "mount_setattr", "move_mount", "mprotect", "mq_getsetattr", @@ -277,9 +281,9 @@ "nanosleep", "newfstatat", "open", + "open_tree", "openat", "openat2", - "open_tree", "pause", "pidfd_getfd", "pidfd_open", @@ -298,8 +302,12 @@ "preadv", "preadv2", "prlimit64", + "process_mrelease", + "process_vm_readv", + "process_vm_writev", "pselect6", "pselect6_time64", + "ptrace", "pwrite64", "pwritev", "pwritev2", @@ -358,7 +366,6 @@ "sendmmsg", "sendmsg", "sendto", - "setns", "set_mempolicy", "set_robust_list", "set_thread_area", @@ -372,6 +379,7 @@ "setgroups", "setgroups32", "setitimer", + "setns", "setpgid", "setpriority", "setregid", @@ -393,10 +401,15 @@ "shmdt", "shmget", "shutdown", + "sigaction", "sigaltstack", + "signal", "signalfd", "signalfd4", + "sigpending", + "sigprocmask", "sigreturn", + "sigsuspend", "socketcall", "socketpair", "splice", @@ -410,6 +423,7 @@ "sync", "sync_file_range", "syncfs", + "syscall", "sysinfo", "syslog", "tee", @@ -422,6 +436,7 @@ "timer_gettime64", "timer_settime", "timer_settime64", + "timerfd", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", @@ -559,10 +574,10 @@ "names": [ "arm_fadvise64_64", "arm_sync_file_range", - "sync_file_range2", "breakpoint", "cacheflush", - "set_tls" + "set_tls", + "sync_file_range2" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -650,7 +665,8 @@ "CAP_DAC_READ_SEARCH" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -693,7 +709,8 @@ "CAP_SYS_ADMIN" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -722,13 +739,14 @@ "CAP_SYS_CHROOT" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ "delete_module", - "init_module", "finit_module", + "init_module", "query_module" ], "action": "SCMP_ACT_ALLOW", @@ -744,8 +762,8 @@ { "names": [ "delete_module", - "init_module", "finit_module", + "init_module", "query_module" ], "action": "SCMP_ACT_ERRNO", @@ -757,7 +775,8 @@ "CAP_SYS_MODULE" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -786,15 +805,13 @@ "CAP_SYS_PACCT" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ "kcmp", - "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace" + "process_madvise" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -809,10 +826,7 @@ { "names": [ "kcmp", - "process_madvise", - "process_vm_readv", - "process_vm_writev", - "ptrace" + "process_madvise" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -823,12 +837,13 @@ "CAP_SYS_PTRACE" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ - "iopl", - "ioperm" + "ioperm", + "iopl" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -842,8 +857,8 @@ }, { "names": [ - "iopl", - "ioperm" + "ioperm", + "iopl" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -854,14 +869,15 @@ "CAP_SYS_RAWIO" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ - "settimeofday", - "stime", "clock_settime", - "clock_settime64" + "clock_settime64", + "settimeofday", + "stime" ], "action": "SCMP_ACT_ALLOW", "args": [], @@ -875,10 +891,10 @@ }, { "names": [ - "settimeofday", - "stime", "clock_settime", - "clock_settime64" + "clock_settime64", + "settimeofday", + "stime" ], "action": "SCMP_ACT_ERRNO", "args": [], @@ -889,7 +905,8 @@ "CAP_SYS_TIME" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -918,7 +935,8 @@ "CAP_SYS_TTY_CONFIG" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -946,7 +964,8 @@ "CAP_AUDIT_WRITE" ] }, - "errnoRet": 22 + "errnoRet": 22, + "errno": "EINVAL" }, { "names": [ diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go index af36b9990b5..f7adde8aba0 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp // SPDX-License-Identifier: Apache-2.0 @@ -10,6 +11,7 @@ import ( "encoding/json" "errors" "fmt" + "strconv" "github.com/opencontainers/runtime-spec/specs-go" libseccomp "github.com/seccomp/libseccomp-golang" @@ -66,6 +68,37 @@ func inSlice(slice []string, s string) bool { return false } +func getArchitectures(config *Seccomp, newConfig *specs.LinuxSeccomp) error { + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + return nil +} + +func getErrno(errno string, def *uint) (*uint, error) { + if errno == "" { + return def, nil + } + v, err := strconv.ParseUint(errno, 10, 32) + if err == nil { + v2 := uint(v) + return &v2, nil + } + + v2, found := errnoArch[errno] + if !found { + return nil, fmt.Errorf("unknown errno %s", errno) + } + return &v2, nil +} + func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { if config == nil { return nil, nil @@ -79,22 +112,22 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) newConfig := &specs.LinuxSeccomp{} var arch string - var native, err = libseccomp.GetNativeArch() + native, err := libseccomp.GetNativeArch() if err == nil { arch = native.String() } - if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { - return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + if err := getArchitectures(config, newConfig); err != nil { + return nil, err } - // if config.Architectures == 0 then libseccomp will figure out the architecture to use - if len(config.Architectures) != 0 { - for _, a := range config.Architectures { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) - } + for _, flag := range config.Flags { + newConfig.Flags = append(newConfig.Flags, specs.LinuxSeccompFlag(flag)) } + newConfig.ListenerPath = config.ListenerPath + newConfig.ListenerMetadata = config.ListenerMetadata + if len(config.ArchMap) != 0 { for _, a := range config.ArchMap { seccompArch, ok := nativeToSeccomp[arch] @@ -111,7 +144,11 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) } newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) - newConfig.DefaultErrnoRet = config.DefaultErrnoRet + + newConfig.DefaultErrnoRet, err = getErrno(config.DefaultErrno, config.DefaultErrnoRet) + if err != nil { + return nil, err + } Loop: // Loop through all syscall blocks and convert them to libcontainer format after filtering them @@ -145,12 +182,17 @@ Loop: return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") } + errno, err := getErrno(call.Errno, call.ErrnoRet) + if err != nil { + return nil, err + } + if call.Name != "" { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet)) + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, errno)) } if len(call.Names) > 0 { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet)) + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, errno)) } } diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go index 8b23ee2c04d..da5230c563f 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !seccomp // +build !linux !seccomp // SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go index 86e1b66bbe3..f8a20e5364a 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/supported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go @@ -1,3 +1,4 @@ +//go:build linux && seccomp // +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go index 07751f72907..784b1ba8dd6 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/types.go +++ b/vendor/github.com/containers/common/pkg/seccomp/types.go @@ -6,13 +6,20 @@ package seccomp // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { - DefaultAction Action `json:"defaultAction"` + DefaultAction Action `json:"defaultAction"` + + // DefaultErrnoRet is obsolete, please use DefaultErrno DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"` + DefaultErrno string `json:"defaultErrno,omitempty"` + // Architectures is kept to maintain backward compatibility with the old // seccomp profile. - Architectures []Arch `json:"architectures,omitempty"` - ArchMap []Architecture `json:"archMap,omitempty"` - Syscalls []*Syscall `json:"syscalls"` + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` + Flags []string `json:"flags,omitempty"` + ListenerPath string `json:"listenerPath,omitempty"` + ListenerMetadata string `json:"listenerMetadata,omitempty"` } // Architecture is used to represent a specific architecture @@ -107,5 +114,7 @@ type Syscall struct { Comment string `json:"comment"` Includes Filter `json:"includes"` Excludes Filter `json:"excludes"` - ErrnoRet *uint `json:"errnoRet,omitempty"` + // ErrnoRet is obsolete, please use Errno + ErrnoRet *uint `json:"errnoRet,omitempty"` + Errno string `json:"errno,omitempty"` } diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go index 1c5c4edc64e..669ab04a2ec 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/validate.go +++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go @@ -1,3 +1,4 @@ +//go:build seccomp // +build seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go index 80fcf54583d..e0c275851f5 100644 --- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -34,7 +34,7 @@ func NewDriver(rootPath string) (*Driver, error) { fileDriver := new(Driver) fileDriver.secretsDataFilePath = filepath.Join(rootPath, secretsDataFile) // the lockfile functions require that the rootPath dir is executable - if err := os.MkdirAll(rootPath, 0700); err != nil { + if err := os.MkdirAll(rootPath, 0o700); err != nil { return nil, err } @@ -95,7 +95,7 @@ func (d *Driver) Store(id string, data []byte) error { if err != nil { return err } - err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0600) + err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0o600) if err != nil { return err } @@ -119,7 +119,7 @@ func (d *Driver) Delete(id string) error { if err != nil { return err } - err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0600) + err = ioutil.WriteFile(d.secretsDataFilePath, marshalled, 0o600) if err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go index 6dc00b34c85..50967b7cfbf 100644 --- a/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/passdriver/passdriver.go @@ -30,6 +30,8 @@ type driverConfig struct { Root string // KeyID contains the key id that will be used for encryption (i.e. user@domain.tld) KeyID string + // GPGHomedir is the homedir where the GPG keys are stored + GPGHomedir string } func (cfg *driverConfig) ParseOpts(opts map[string]string) { @@ -40,6 +42,9 @@ func (cfg *driverConfig) ParseOpts(opts map[string]string) { if val, ok := opts["key"]; ok { cfg.KeyID = val } + if val, ok := opts["gpghomedir"]; ok { + cfg.GPGHomedir = val + } } func defaultDriverConfig() *driverConfig { @@ -156,6 +161,9 @@ func (d *Driver) Delete(id string) error { } func (d *Driver) gpg(ctx context.Context, in io.Reader, out io.Writer, args ...string) error { + if d.GPGHomedir != "" { + args = append([]string{"--homedir", d.GPGHomedir}, args...) + } cmd := exec.CommandContext(ctx, "gpg", args...) cmd.Env = os.Environ() cmd.Stdin = in diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index aea983cb15a..4a04e2b2fab 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -102,7 +102,7 @@ func NewManager(rootPath string) (*SecretsManager, error) { return nil, errors.Wrapf(errInvalidPath, "path must be absolute: %s", rootPath) } // the lockfile functions require that the rootPath dir is executable - if err := os.MkdirAll(rootPath, 0700); err != nil { + if err := os.MkdirAll(rootPath, 0o700); err != nil { return nil, err } @@ -237,7 +237,6 @@ func (s *SecretsManager) List() ([]Secret, error) { var ls []Secret for _, v := range secrets { ls = append(ls, v) - } return ls, nil } diff --git a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go index 0c4929995b2..4d2ca0fca43 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go +++ b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go @@ -177,7 +177,7 @@ func (s *SecretsManager) store(entry *Secret) error { if err != nil { return err } - err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0600) + err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0o600) if err != nil { return err } @@ -203,7 +203,7 @@ func (s *SecretsManager) delete(nameOrID string) error { if err != nil { return err } - err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0600) + err = ioutil.WriteFile(s.secretsDBPath, marshalled, 0o600) if err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go index 22aacb1ce9e..8eac200f7d5 100644 --- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go @@ -3,12 +3,12 @@ package shelldriver import ( "bytes" "context" + "fmt" "os" "os/exec" "sort" "strings" - "github.com/mitchellh/mapstructure" "github.com/pkg/errors" ) @@ -27,22 +27,33 @@ var ( type driverConfig struct { // DeleteCommand contains a shell command that deletes a secret. // The secret id is provided as environment variable SECRET_ID - DeleteCommand string `mapstructure:"delete"` + DeleteCommand string // ListCommand contains a shell command that lists all secrets. // The output is expected to be one id per line - ListCommand string `mapstructure:"list"` + ListCommand string // LookupCommand contains a shell command that retrieves a secret. // The secret id is provided as environment variable SECRET_ID - LookupCommand string `mapstructure:"lookup"` + LookupCommand string // StoreCommand contains a shell command that stores a secret. // The secret id is provided as environment variable SECRET_ID - // The secret value itself is provied over stdin - StoreCommand string `mapstructure:"store"` + // The secret value itself is provided over stdin + StoreCommand string } func (cfg *driverConfig) ParseOpts(opts map[string]string) error { - if err := mapstructure.Decode(opts, cfg); err != nil { - return err + for key, value := range opts { + switch key { + case "delete": + cfg.DeleteCommand = value + case "list": + cfg.ListCommand = value + case "lookup": + cfg.LookupCommand = value + case "store": + cfg.StoreCommand = value + default: + return fmt.Errorf("invalid shell driver option: %q", key) + } } if cfg.DeleteCommand == "" || cfg.ListCommand == "" || diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go index 305b9d21f41..21e09c9fef0 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go @@ -1,5 +1,5 @@ -// +build linux -// +build !mips,!mipsle,!mips64,!mips64le +//go:build linux && !mips && !mipsle && !mips64 && !mips64le +// +build linux,!mips,!mipsle,!mips64,!mips64le // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go index 45c9d5af1e6..52b07aaf463 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go @@ -1,3 +1,4 @@ +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go index 9d1733c02d5..0e8685a7c56 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux // Signal handling for Linux only. diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index 6c9321e7357..4410292a773 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -149,14 +149,15 @@ func getMountsMap(path string) (string, string, error) { //nolint // MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem // mountLabel: MAC/SELinux label for container content -// containerWorkingDir: Private data for storing subscriptions on the host mounted in container. +// containerRunDir: Private data for storing subscriptions on the host mounted in container. // mountFile: Additional mount points required for the container. -// mountPoint: Container image mountpoint +// mountPoint: Container image mountpoint, or the directory from the hosts perspective that +// corresponds to `/` in the container. // uid: to assign to content created for subscriptions // gid: to assign to content created for subscriptions // rootless: indicates whether container is running in rootless mode // disableFips: indicates whether system should ignore fips mode -func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { +func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { var ( subscriptionMounts []rspec.Mount mountFiles []string @@ -174,7 +175,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str } for _, file := range mountFiles { if _, err := os.Stat(file); err == nil { - mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerWorkingDir, uid, gid) + mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerRunDir, uid, gid) if err != nil { logrus.Warnf("Failed to mount subscriptions, skipping entry in %s: %v", file, err) } @@ -191,7 +192,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str _, err := os.Stat("/etc/system-fips") switch { case err == nil: - if err := addFIPSModeSubscription(&subscriptionMounts, containerWorkingDir, mountPoint, mountLabel, uid, gid); err != nil { + if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { logrus.Errorf("Adding FIPS mode subscription to container: %v", err) } case os.IsNotExist(err): @@ -210,7 +211,7 @@ func rchown(chowndir string, uid, gid int) error { // addSubscriptionsFromMountsFile copies the contents of host directory to container directory // and returns a list of mounts -func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir string, uid, gid int) ([]rspec.Mount, error) { +func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) { var mounts []rspec.Mount defaultMountsPaths := getMounts(filePath) for _, path := range defaultMountsPaths { @@ -228,7 +229,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st return nil, err } - ctrDirOrFileOnHost := filepath.Join(containerWorkingDir, ctrDirOrFile) + ctrDirOrFileOnHost := filepath.Join(containerRunDir, ctrDirOrFile) // In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost _, err = os.Stat(ctrDirOrFileOnHost) @@ -261,7 +262,6 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st data, err := readFileOrDir("", hostDirOrFile, mode.Perm()) if err != nil { return nil, err - } for _, s := range data { if err := os.MkdirAll(filepath.Dir(ctrDirOrFileOnHost), s.dirMode); err != nil { @@ -300,15 +300,19 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st return mounts, nil } -// addFIPSModeSubscription creates /run/secrets/system-fips in the container -// root filesystem if /etc/system-fips exists on hosts. -// This enables the container to be FIPS compliant and run openssl in -// FIPS mode as the host is also in FIPS mode. -func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPoint, mountLabel string, uid, gid int) error { +// addFIPSModeSubscription adds mounts to the `mounts` slice that are needed for the container to run openssl in FIPs mode +// (i.e: be FIPs compliant). +// It should only be called if /etc/system-fips exists on host. +// It primarily does two things: +// - creates /run/secrets/system-fips in the container root filesystem, and adds it to the `mounts` slice. +// - If `/etc/crypto-policies/back-ends` already exists inside of the container, it creates +// `/usr/share/crypto-policies/back-ends/FIPS` inside the container as well. +// It is done from within the container to ensure to avoid policy incompatibility between the container and host. +func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { subscriptionsDir := "/run/secrets" - ctrDirOnHost := filepath.Join(containerWorkingDir, subscriptionsDir) + ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) { - if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint + if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint return err } if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil { @@ -322,7 +326,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPo if err != nil { return errors.Wrap(err, "creating system-fips file in container for FIPS mode") } - defer file.Close() + file.Close() } if !mountExists(*mounts, subscriptionsDir) { diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go index aeb1a3a8040..d9d8cfb3e54 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows // +build !linux,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go index 2b664c7f8dd..0adf5835855 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go index 1d89dd55030..94160ad5790 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go index 1fc4e6d19d8..859791e369b 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go index e3c851fe617..c9e4184aa9f 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go @@ -1,4 +1,5 @@ -// +build windows, osx +//go:build (windows && ignore) || osx +// +build windows,ignore osx package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go index 7463cdd8f03..af1c77d6050 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris && cgo // +build solaris,cgo package sysinfo @@ -45,7 +46,7 @@ func IsCPUSharesAvailable() bool { // New returns a new SysInfo, using the filesystem to detect which features // the kernel supports. -//NOTE Solaris: If we change the below capabilities be sure +// NOTE Solaris: If we change the below capabilities be sure // to update verifyPlatformContainerSettings() in daemon_solaris.go func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} @@ -63,7 +64,6 @@ func New(quiet bool) *SysInfo { // setCgroupMem reads the memory information for Solaris. func setCgroupMem(quiet bool) cgroupMemInfo { - return cgroupMemInfo{ MemoryLimit: true, SwapLimit: true, @@ -76,7 +76,6 @@ func setCgroupMem(quiet bool) cgroupMemInfo { // setCgroupCPU reads the cpu information for Solaris. func setCgroupCPU(quiet bool) cgroupCPUInfo { - return cgroupCPUInfo{ CPUShares: true, CPUCfsPeriod: false, @@ -88,7 +87,6 @@ func setCgroupCPU(quiet bool) cgroupCPUInfo { // blkio switches are not supported in Solaris. func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { - return cgroupBlkioInfo{ BlkioWeight: false, BlkioWeightDevice: false, @@ -97,7 +95,6 @@ func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { // setCgroupCPUsetInfo reads the cpuset information for Solaris. func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { - return cgroupCpusetInfo{ Cpuset: true, Cpus: getCPUCount(), diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go index 45f3ef1c65f..4aa9401f6c1 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go @@ -1,3 +1,4 @@ +//go:build !linux && !solaris && !windows // +build !linux,!solaris,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go index 4e6255bc592..455a8892f21 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/timetype/timestamp.go b/vendor/github.com/containers/common/pkg/timetype/timestamp.go index ce2cb64f28b..3cbfe40980b 100644 --- a/vendor/github.com/containers/common/pkg/timetype/timestamp.go +++ b/vendor/github.com/containers/common/pkg/timetype/timestamp.go @@ -34,13 +34,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - if strings.Contains(value, ".") { // nolint:gocritic + switch { + case strings.Contains(value, "."): if parseInLocation { format = rFC3339NanoLocal } else { format = time.RFC3339Nano } - } else if strings.Contains(value, "T") { + case strings.Contains(value, "T"): // we want the number of colons in the T portion of the timestamp tcolons := strings.Count(value, ":") // if parseInLocation is off and we have a +/- zone offset (not Z) then @@ -68,9 +69,9 @@ func GetTimestamp(value string, reference time.Time) (string, error) { format = time.RFC3339 } } - } else if parseInLocation { + case parseInLocation: format = dateLocal - } else { + default: format = dateWithZone } @@ -112,7 +113,7 @@ func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) return parseTimestamp(value) } -func parseTimestamp(value string) (int64, int64, error) { // nolint:gocritic +func parseTimestamp(value string) (int64, int64, error) { sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unix.go b/vendor/github.com/containers/common/pkg/umask/umask_unix.go index bb589f7ac04..4f5527cb63b 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unix.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package umask @@ -9,8 +10,8 @@ import ( ) func Check() { - oldUmask := syscall.Umask(0022) //nolint - if (oldUmask & ^0022) != 0 { + oldUmask := syscall.Umask(0o022) //nolint + if (oldUmask & ^0o022) != 0 { logrus.Debugf("umask value too restrictive. Forcing it to 022") } } diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go index 9041d5f2098..cf76ea1d37f 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin // +build !linux,!darwin package umask diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go new file mode 100644 index 00000000000..98890a686f8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -0,0 +1,24 @@ +package util + +import "regexp" + +// StringInSlice determines if a string is in a string slice, returns bool +func StringInSlice(s string, sl []string) bool { + for _, i := range sl { + if i == s { + return true + } + } + return false +} + +// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool +func StringMatchRegexSlice(s string, re []string) bool { + for _, r := range re { + m, err := regexp.MatchString(r, s) + if err == nil && m { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go similarity index 75% rename from vendor/github.com/containers/common/pkg/config/util_supported.go rename to vendor/github.com/containers/common/pkg/util/util_supported.go index 33e4a9e8fc2..35201f93237 100644 --- a/vendor/github.com/containers/common/pkg/config/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -1,6 +1,7 @@ -// +build linux darwin +//go:build linux || darwin || freebsd +// +build linux darwin freebsd -package config +package util import ( "fmt" @@ -19,8 +20,14 @@ var ( rootlessRuntimeDir string ) -// getRuntimeDir returns the runtime directory -func getRuntimeDir() (string, error) { +// isWriteableOnlyByOwner checks that the specified permission mask allows write +// access only to the owner. +func isWriteableOnlyByOwner(perm os.FileMode) bool { + return (perm & 0o722) == 0o700 +} + +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { var rootlessRuntimeDirError error rootlessRuntimeDirOnce.Do(func() { @@ -39,21 +46,21 @@ func getRuntimeDir() (string, error) { uid := fmt.Sprintf("%d", unshare.GetRootlessUID()) if runtimeDir == "" { tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { logrus.Debugf("unable to make temp dir: %v", err) } st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { runtimeDir = tmpDir } } if runtimeDir == "" { tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { logrus.Debugf("unable to make temp dir %v", err) } st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 { + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { runtimeDir = tmpDir } } diff --git a/vendor/github.com/containers/common/pkg/config/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go similarity index 71% rename from vendor/github.com/containers/common/pkg/config/util_windows.go rename to vendor/github.com/containers/common/pkg/util/util_windows.go index 995301f5df0..1cffb21fc31 100644 --- a/vendor/github.com/containers/common/pkg/config/util_windows.go +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -1,12 +1,13 @@ +//go:build windows // +build windows -package config +package util import ( "github.com/pkg/errors" ) // getRuntimeDir returns the runtime directory -func getRuntimeDir() (string, error) { +func GetRuntimeDir() (string, error) { return "", errors.New("this function is not implemented for windows") } diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index b6ceabce531..61fce9d2298 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.46.1-dev" +const Version = "0.48.0" diff --git a/vendor/github.com/containers/conmon-rs/pkg/client/attach.go b/vendor/github.com/containers/conmon-rs/pkg/client/attach.go index 7003f325f21..ce432aa5d12 100644 --- a/vendor/github.com/containers/conmon-rs/pkg/client/attach.go +++ b/vendor/github.com/containers/conmon-rs/pkg/client/attach.go @@ -8,9 +8,9 @@ import ( "net" "github.com/containers/conmon-rs/internal/proto" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/kubeutils" - "github.com/containers/podman/v3/utils" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/kubeutils" + "github.com/containers/podman/v4/utils" ) const ( diff --git a/vendor/github.com/containers/conmon-rs/pkg/client/client.go b/vendor/github.com/containers/conmon-rs/pkg/client/client.go index a1ec4e242e5..0aeb114d8ec 100644 --- a/vendor/github.com/containers/conmon-rs/pkg/client/client.go +++ b/vendor/github.com/containers/conmon-rs/pkg/client/client.go @@ -20,9 +20,10 @@ import ( ) const ( - binaryName = "conmonrs" - socketName = "conmon.sock" - pidFileName = "pidfile" + binaryName = "conmonrs" + socketName = "conmon.sock" + pidFileName = "pidfile" + defaultTimeout = 10 * time.Second ) var ( @@ -101,7 +102,9 @@ func New(config *ConmonServerConfig) (client *ConmonClient, retErr error) { return nil, fmt.Errorf("convert config to client: %w", err) } // Check if the process has already started, and inherit that process instead. - if resp, err := cl.Version(context.Background()); err == nil { + ctx, cancel := defaultContext() + defer cancel() + if resp, err := cl.Version(ctx); err == nil { cl.serverPID = resp.ProcessID return cl, nil @@ -182,7 +185,6 @@ func (c *ConmonClient) startServer(config *ConmonServerConfig) error { } func (c *ConmonClient) toArgs(config *ConmonServerConfig) (entrypoint string, args []string, err error) { - const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) if c == nil { return "", args, nil } @@ -270,16 +272,26 @@ func pidGivenFile(file string) (uint32, error) { func (c *ConmonClient) waitUntilServerUp() (err error) { for i := 0; i < 100; i++ { - _, err = c.Version(context.Background()) + ctx, cancel := defaultContext() + + _, err = c.Version(ctx) if err == nil { + cancel() + break } + + cancel() time.Sleep(1 * time.Millisecond) } return err } +func defaultContext() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), defaultTimeout) +} + func (c *ConmonClient) newRPCConn() (*rpc.Conn, error) { socketConn, err := DialLongSocket("unix", c.socket()) if err != nil { @@ -471,6 +483,9 @@ func (c *ConmonClient) CreateContainer( if err := stringSliceToTextList(cfg.OOMExitPaths, req.NewOomExitPaths); err != nil { return fmt.Errorf("convert oom exit paths string slice to text list: %w", err) } + if err := stringSliceToTextList(cfg.OOMExitPaths, req.NewOomExitPaths); err != nil { + return err + } if err := c.initLogDrivers(&req, cfg.LogDrivers); err != nil { return fmt.Errorf("init log drivers: %w", err) diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index e1649ba8e18..d28cc4a3ffd 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "reflect" "strings" @@ -15,8 +14,10 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/pkg/platform" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/compression" @@ -31,7 +32,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v7" - "github.com/vbauerster/mpb/v7/decor" "golang.org/x/sync/semaphore" "golang.org/x/term" ) @@ -63,8 +63,8 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource + dest private.ImageDestination + rawSource private.ImageSource reportWriter io.Writer progressOutput io.Writer progressInterval time.Duration @@ -80,13 +80,13 @@ type copier struct { // imageCopier tracks state specific to a single image (possibly an item of a manifest list) type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool - canSubstituteBlobs bool - ociEncryptLayers *[]int + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + ociEncryptLayers *[]int } const ( @@ -122,17 +122,23 @@ type ImageListSelection int // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. + SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination ReportWriter io.Writer SourceCtx *types.SystemContext DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type ForceManifestMIMEType string ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. // The encryption options is derived from the construction of EncryptConfig object. // Note: During initial encryption process of a layer, the resultant digest is not known @@ -191,26 +197,28 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, return nil, err } - reportWriter := ioutil.Discard + reportWriter := io.Discard if options.ReportWriter != nil { reportWriter = options.ReportWriter } - dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef)) } + dest := imagedestination.FromPublic(publicDest) defer func() { if err := dest.Close(); err != nil { retErr = errors.Wrapf(retErr, " (dest: %v)", err) } }() - rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef)) } + rawSource := imagesource.FromPublic(publicRawSource) defer func() { if err := rawSource.Close(); err != nil { retErr = errors.Wrapf(retErr, " (src: %v)", err) @@ -222,7 +230,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, // createProgressBar() will print a single line instead. progressOutput := reportWriter if !isTTY(reportWriter) { - progressOutput = ioutil.Discard + progressOutput = io.Discard } c := &copier{ @@ -410,7 +418,36 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) } } - canModifyManifestList := (len(sigs) == 0) + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, errors.Wrapf(err, "computing digest of source image's manifest") + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyOneImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } // Determine if we'll need to convert the manifest list to a different format. forceListMIMEType := options.ForceManifestMIMEType @@ -425,8 +462,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "determining manifest list type to write to destination") } if selectedListType != originalList.MIMEType() { - if !canModifyManifestList { - return nil, errors.Errorf("manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) } } @@ -510,8 +547,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // If we can't just use the original value, but we have to change it, flag an error. if !bytes.Equal(attemptedManifestList, originalManifestList) { - if !canModifyManifestList { - return nil, errors.Errorf(" manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) } logrus.Debugf("Manifest list has been updated") } else { @@ -536,7 +573,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Sign the manifest list. if options.SignBy != "" { - newSig, err := c.createSignature(manifestList, options.SignBy) + newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity) if err != nil { return nil, err } @@ -629,13 +666,27 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } } + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + ic := imageCopier{ c: c, manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, // diffIDsAreNeeded is computed later - canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, - ociEncryptLayers: options.OciEncryptLayers, + cannotModifyManifestReason: cannotModifyManifestReason, + ociEncryptLayers: options.OciEncryptLayers, } // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: @@ -643,7 +694,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" if err := ic.updateEmbeddedDockerReference(); err != nil { return nil, "", "", err @@ -660,8 +711,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - // If encrypted and decryption keys provided, we should try to decrypt - ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal if options.OptimizeDestinationImageAlreadyExists { @@ -710,10 +759,10 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. - // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. - if !ic.canModifyManifest { - return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible, image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return nil, "", "", errors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason) } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. @@ -744,7 +793,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy) + newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity) if err != nil { return nil, "", "", err } @@ -813,9 +862,9 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { return nil // No reference embedded in the manifest, or it matches destRef already. } - if !ic.canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which is not possible (image is signed or the destination specifies a digest)", - transports.ImageName(ic.c.dest.Reference()), destRef.String()) + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) } ic.manifestUpdates.EmbeddedDockerReference = destRef return nil @@ -833,7 +882,7 @@ func isTTY(w io.Writer) bool { return false } -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() numLayers := len(srcInfos) @@ -844,8 +893,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfosUpdated := false // If we only need to check authorization, no updates required. if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Copying this image requires changing layer representation, which is not possible (image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) } srcInfos = updatedSrcInfos srcInfosUpdated = true @@ -975,8 +1024,8 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { pendingImage := ic.src if !ic.noPendingManifestUpdates() { - if !ic.canModifyManifest { - return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + if ic.cannotModifyManifestReason != "" { + return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) } if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. @@ -1011,96 +1060,12 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc instanceDigest = &manifestDigest } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - return nil, "", errors.Wrapf(err, "writing manifest %q", string(man)) + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", errors.Wrapf(err, "writing manifest") } return man, manifestDigest, nil } -// newProgressPool creates a *mpb.Progress. -// The caller must eventually call pool.Wait() after the pool will no longer be updated. -// NOTE: Every progress bar created within the progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. -func (c *copier) newProgressPool() *mpb.Progress { - return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) -} - -// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar -func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator { - producer := func(filler interface{}) decor.DecorFunc { - return func(s decor.Statistics) string { - if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) - } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" - percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) - } - } - return decor.Any(producer(filler), wcc...) -} - -// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter -// is ioutil.Discard, the progress bar's output will be discarded -// NOTE: Every progress bar created within a progress pool must either successfully -// complete or be aborted, or pool.Wait() will hang. That is typically done -// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. -func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { - // shortDigestLen is the length of the digest used for blobs. - const shortDigestLen = 12 - - prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) - // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. - maxPrefixLen := len("Copying blob ") + shortDigestLen - if len(prefix) > maxPrefixLen { - prefix = prefix[:maxPrefixLen] - } - - // onComplete will replace prefix once the bar/spinner has completed - onComplete = prefix + " " + onComplete - - // Use a normal progress bar when we know the size (i.e., size > 0). - // Otherwise, use a spinner to indicate that something's happening. - var bar *mpb.Bar - sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft() - if info.Size > 0 { - if partial { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - customPartialBlobCounter(sstyle.Build()), - ), - ) - } else { - bar = pool.AddBar(info.Size, - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), - ), - ) - } - } else { - bar = pool.Add(0, - sstyle.Build(), - mpb.BarFillerClearOnComplete(), - mpb.PrependDecorators( - decor.OnComplete(decor.Name(prefix), onComplete), - ), - ) - } - if c.progressOutput == ioutil.Discard { - c.Printf("Copying %s %s\n", kind, info.Digest) - } - return bar -} - // copyConfig copies config.json, if any, from src to dest. func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() @@ -1111,22 +1076,23 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error { } defer c.concurrentBlobCopiesSemaphore.Release(1) - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) - } - destInfo, err := func() (types.BlobInfo, error) { // A scope for defer progressPool := c.newProgressPool() defer progressPool.Wait() bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") defer bar.Abort(false) + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) + } + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) if err != nil { return types.BlobInfo{}, err } - bar.SetTotal(int64(len(configBlob)), true) + + bar.mark100PercentComplete() return destInfo, nil }() if err != nil { @@ -1171,48 +1137,37 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - // Diffs are needed if we are encrypting an image or trying to decrypt an image - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) - - // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. - if !diffIDIsNeeded { + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. // Fixing that will probably require passing more information to TryReusingBlob() than the current version of // the ImageDestination interface lets us pass in. - var ( - blobInfo types.BlobInfo - reused bool - err error - ) - // Note: the storage destination optimizes the committing of - // layers which requires passing the index of the layer. - // Hence, we need to special case and cast. - dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: ic.canSubstituteBlobs, - SrcRef: srcRef, - EmptyLayer: emptyLayer, - } - options.LayerIndex = &layerIndex - reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options) - } else { - reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) - } - + reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: ic.canSubstituteBlobs, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + }) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest) } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists") defer bar.Abort(false) - bar.SetTotal(0, true) + bar.mark100PercentComplete() }() // Throw an event that the layer has been skipped @@ -1243,9 +1198,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // of the source file are not known yet and must be fetched. // Attempt a partial only when the source allows to retrieve a blob partially and // the destination has support for it. - imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable) - imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial) - if okSource && okDest && !diffIDIsNeeded { + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") hideProgressBar := true @@ -1253,33 +1206,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to bar.Abort(hideProgressBar) }() - progress := make(chan int64) - terminate := make(chan interface{}) - - defer close(terminate) - defer close(progress) - - proxy := imageSourceSeekableProxy{ - source: imgSource, - progress: progress, + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, } - go func() { - for { - select { - case written := <-progress: - bar.IncrInt64(written) - case <-terminate: - return - } - } - }() - - bar.SetTotal(srcInfo.Size, false) - info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) + info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { - bar.SetRefill(srcInfo.Size - bar.Current()) - bar.SetCurrent(srcInfo.Size) - bar.SetTotal(srcInfo.Size, true) + if srcInfo.Size != -1 { + bar.SetRefill(srcInfo.Size - bar.Current()) + } + bar.mark100PercentComplete() hideProgressBar = false logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) return true, info @@ -1292,16 +1228,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } // Fallback: copy the layer, computing the diffID if we need to do so - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") defer bar.Abort(false) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest) + } + defer srcStream.Close() + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) if err != nil { return types.BlobInfo{}, "", err @@ -1317,14 +1253,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } diffID = diffIDResult.digest } } - bar.SetTotal(srcInfo.Size, true) + bar.mark100PercentComplete() return blobInfo, diffID, nil }() } @@ -1334,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -1359,7 +1303,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -1411,7 +1355,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer, - canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { + canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) { if isConfig { // This is guaranteed by the caller, but set it here to be explicit. canModifyBlob = false } @@ -1431,6 +1375,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } var destStream io.Reader = digestingReader + // === Update progress bars + destStream = bar.ProxyReader(destStream) + // === Decrypt the stream, if required. var decrypted bool if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil { @@ -1465,9 +1412,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name()) } - // === Update progress bars - destStream = bar.ProxyReader(destStream) - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. if getOriginalLayerCopyWriter != nil { @@ -1615,24 +1559,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // === Finally, send the layer stream to dest. - var uploadedInfo types.BlobInfo - // Note: the storage destination optimizes the committing of layers - // which requires passing the index of the layer. Hence, we need to - // special case and cast. - dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.PutBlobOptions{ - Cache: c.blobInfoCache, - IsConfig: isConfig, - EmptyLayer: emptyLayer, - } - if !isConfig { - options.LayerIndex = &layerIndex - } - uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) - } else { - uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig) + options := private.PutBlobOptions{ + Cache: c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, } + if !isConfig { + options.LayerIndex = &layerIndex + } + uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "writing blob") } @@ -1664,7 +1599,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // sent there if we are not already at EOF. if getOriginalLayerCopyWriter != nil { logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) + _, err := io.Copy(io.Discard, originalLayerReader) if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest) } @@ -1677,19 +1612,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) } if digestingReader.validationSucceeded { - // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: - // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader - // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob - // (because inputInfo.Digest == "", this must have been computed afresh). - switch compressionOperation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: - c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: - c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) - default: - return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encrypted && !decrypted { + // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: + // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader + // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob + // (because inputInfo.Digest == "", this must have been computed afresh). + switch compressionOperation { + case types.PreserveOriginal: + break // Do nothing, we have only one digest and we might not have even verified it. + case types.Compress: + c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + case types.Decompress: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + default: + return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) + } } if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression { c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName) diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index b97edbf08b7..86ec8863aea 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -79,10 +79,10 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if !ic.canModifyManifest { - // We could also drop the !ic.canModifyManifest check and have the caller + if ic.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” + // messages. But it is nice to hide the “if we can't modify, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go new file mode 100644 index 00000000000..585d860570d --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -0,0 +1,148 @@ +package copy + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v7" + "github.com/vbauerster/mpb/v7/decor" +) + +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) +} + +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) + } + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) +} + +// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods. +type progressBar struct { + *mpb.Bar + originalSize int64 // or -1 if unknown +} + +// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter +// is io.Discard, the progress bar's output will be discarded +// +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. +// +// As a convention, most users of progress bars should call mark100PercentComplete on full success; +// by convention, we don't leave progress bars in partial state when fully done +// (even if we copied much less data than anticipated). +func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { + // shortDigestLen is the length of the digest used for blobs. + const shortDigestLen = 12 + + prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) + // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. + maxPrefixLen := len("Copying blob ") + shortDigestLen + if len(prefix) > maxPrefixLen { + prefix = prefix[:maxPrefixLen] + } + + // onComplete will replace prefix once the bar/spinner has completed + onComplete = prefix + " " + onComplete + + // Use a normal progress bar when we know the size (i.e., size > 0). + // Otherwise, use a spinner to indicate that something's happening. + var bar *mpb.Bar + if info.Size > 0 { + if partial { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.Any(customPartialBlobDecorFunc), + ), + ) + } else { + bar = pool.AddBar(info.Size, + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + ), + ) + } + } else { + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), + mpb.BarFillerClearOnComplete(), + mpb.PrependDecorators( + decor.OnComplete(decor.Name(prefix), onComplete), + ), + ) + } + if c.progressOutput == io.Discard { + c.Printf("Copying %s %s\n", kind, info.Digest) + } + return &progressBar{ + Bar: bar, + originalSize: info.Size, + } +} + +// mark100PercentComplete marks the progres bars as 100% complete; +// it may do so by possibly advancing the current state if it is below the known total. +func (bar *progressBar) mark100PercentComplete() { + if bar.originalSize > 0 { + // We can't call bar.SetTotal even if we wanted to; the total can not be changed + // after a progress bar is created with a definite total. + bar.SetCurrent(bar.originalSize) // This triggers the completion condition. + } else { + // -1 = unknown size + // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known + // size (possible at least in theory), in mpb, zero-sized progress bars are treated + // as unknown size, in particular they are not configured to be marked as + // complete on bar.Current() reaching bar.total (because that would happen already + // when creating the progress bar). + // That means that we are both _allowed_ to call SetTotal, and we _have to_. + bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete. + } +} + +// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar +// with the number of received bytes. +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *progressBar // A progress bar updated with the number of bytes read so far +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) + if err == nil { + total := int64(0) + for _, c := range chunks { + total += int64(c.Length) + } + s.bar.IncrInt64(total) + } + return rc, errs, err +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_channel.go similarity index 68% rename from vendor/github.com/containers/image/v5/copy/progress_reader.go rename to vendor/github.com/containers/image/v5/copy/progress_channel.go index 42f490d326a..d5e9e09bda9 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_reader.go +++ b/vendor/github.com/containers/image/v5/copy/progress_channel.go @@ -1,15 +1,13 @@ package copy import ( - "context" "io" "time" - internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/types" ) -// progressReader is a reader that reports its progress on an interval. +// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval. type progressReader struct { source io.Reader channel chan<- types.ProgressProperties @@ -79,26 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) { } return n, err } - -// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes -// are received. -type imageSourceSeekableProxy struct { - // source is the seekable input to read from. - source internalTypes.ImageSourceSeekable - // progress is the chan where the total number of bytes read so far are reported. - progress chan int64 -} - -// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received -// to the progress chan. -func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks) - if err == nil { - total := int64(0) - for _, c := range chunks { - total += int64(c.Length) - } - s.progress <- total - } - return rc, errs, err -} diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 61612a4d3d7..aa42674bcdc 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -1,13 +1,14 @@ package copy import ( + "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" "github.com/pkg/errors" ) // createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { +func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "initializing GPG") @@ -17,13 +18,19 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e return nil, errors.Wrap(err, "Signing not supported") } - dockerReference := c.dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + if identity != nil { + if reference.IsNameOnly(identity) { + return nil, errors.Errorf("Sign identity must be a fully specified reference %s", identity) + } + } else { + identity = c.dest.Reference().DockerReference() + if identity == nil { + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) + } } c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) + newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) if err != nil { return nil, errors.Wrap(err, "creating signature") } diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index ea20e7c5e41..3b135e68e50 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -3,7 +3,6 @@ package directory import ( "context" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -62,7 +61,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath()) } if versionExists { - contents, err := ioutil.ReadFile(d.ref.versionPath()) + contents, err := os.ReadFile(d.ref.versionPath()) if err != nil { return nil, err } @@ -86,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag } } // create version file - err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) + err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644) if err != nil { return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath()) } @@ -149,7 +148,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") + blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -232,7 +231,7 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { - return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) + return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) } // PutSignatures writes a set of signatures to the destination. @@ -240,7 +239,7 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { + if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil { return err } } @@ -272,7 +271,7 @@ func pathExists(path string) (bool, error) { // returns true if directory is empty func isDirEmpty(path string) (bool, error) { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return false, err } @@ -281,7 +280,7 @@ func isDirEmpty(path string) (bool, error) { // deletes the contents of a directory func removeDirContents(path string) error { - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return err } diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go index ad9129d4012..8b509112aa8 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_src.go +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -3,7 +3,6 @@ package directory import ( "context" "io" - "io/ioutil" "os" "github.com/containers/image/v5/manifest" @@ -37,7 +36,7 @@ func (s *dirImageSource) Close() error { // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest)) + m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) if err != nil { return nil, "", err } @@ -71,7 +70,7 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { signatures := [][]byte{} for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest)) + signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest)) if err != nil { if os.IsNotExist(err) { break diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 3fe9a11d003..f537de72da3 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,13 +1,11 @@ package docker import ( - "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -463,7 +461,11 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea return nil, err } - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) + url, err := url.Parse(urlString) + if err != nil { + return nil, err + } return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) } @@ -500,7 +502,7 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat // makeRequest should generally be preferred. // In case of an HTTP 429 status code in the response, it may automatically retry a few times. // TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { +func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { delay := backoffInitialDelay attempts := 0 for { @@ -518,7 +520,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url if delay > backoffMaxDelay { delay = backoffMaxDelay } - logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds()) + logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds()) select { case <-ctx.Done(): return nil, ctx.Err() @@ -533,12 +535,12 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // Note that no exponential back off is performed when receiving an http 429 status code. -func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, url, stream) +func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, url.String(), stream) if err != nil { return nil, err } - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. + if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") @@ -553,7 +555,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, return nil, err } } - logrus.Debugf("%s %s", method, url) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.client.Do(req) if err != nil { return nil, err @@ -650,10 +652,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall params.Add("refresh_token", c.auth.IdentityToken) params.Add("client_id", "containers/image") - authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode())) + authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) authReq.Header.Add("User-Agent", c.userAgent) authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err @@ -705,13 +707,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, } authReq.Header.Add("User-Agent", c.userAgent) - logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) + logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() - if err := httpResponseToError(res, "Requesting bear token"); err != nil { + if err := httpResponseToError(res, "Requesting bearer token"); err != nil { return nil, err } tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) @@ -735,14 +737,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { c.client = &http.Client{Transport: tr} ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) + if err != nil { + return err + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return err } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return httpResponseToError(resp, "") } @@ -762,14 +767,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) + url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)) + if err != nil { + return false + } resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) + logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err) return false } defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) + logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 56c39c141d7..d02100cf80b 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -7,7 +7,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -182,7 +181,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL // returns, so there isn’t a way for the error text to be provided to any of our callers. defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) if err != nil { logrus.Debugf("Error uploading layer chunked %v", err) return nil, err @@ -207,7 +206,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, locationQuery := uploadLocation.Query() locationQuery.Set("digest", blobDigest.String()) uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) + res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { return types.BlobInfo{}, err } @@ -257,9 +256,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc "from": {reference.Path(srcRepo)}, }.Encode(), } - mountPath := u.String() - logrus.Debugf("Trying to mount %s", mountPath) - res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope) + logrus.Debugf("Trying to mount %s", u.Redacted()) + res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope) if err != nil { return err } @@ -276,8 +274,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc if err != nil { return errors.Wrap(err, "determining upload URL after a mount attempt") } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) - res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) + logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted()) + res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope) if err != nil { logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) } else { @@ -464,6 +462,18 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst } return err } + // A HTTP server may not be a registry at all, and just return 200 OK to everything + // (in particular that can fairly easily happen after tearing down a website and + // replacing it with a global 302 redirect to a new website, completely ignoring the + // path in the request); in that case we could “succeed” uploading a whole image. + // With docker/distribution we could rely on a Docker-Content-Digest header being present + // (because docker/distribution/registry/client has been failing uploads if it was missing), + // but that has been defined as explicitly optional by + // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers + // So, just note the missing header in a debug log. + if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { + logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") + } return nil } @@ -581,16 +591,16 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) if err != nil { return err } - err = ioutil.WriteFile(url.Path, signature, 0644) + err = os.WriteFile(url.Path, signature, 0644) if err != nil { return err } return nil case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted()) default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) + return errors.Errorf("Unsupported scheme when writing signature to %s", url.Redacted()) } } @@ -608,9 +618,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error return false, err case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) + return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted()) default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) + return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted()) } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 314e9b394ee..085a3afcc5c 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -16,7 +15,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" @@ -147,6 +146,11 @@ func (s *dockerImageSource) Close() error { return nil } +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (s *dockerImageSource) SupportsGetBlobAt() bool { + return true +} + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() // to read the image's layers. @@ -248,13 +252,14 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") } for _, u := range urls { - if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") { + url, err := url.Parse(u) + if err != nil || (url.Scheme != "http" && url.Scheme != "https") { continue // unsupported url. skip this url. } // NOTE: we must not authenticate on additional URLs as those // can be abused to leak credentials or tokens. Please // refer to CVE-2020-15157 for more information. - resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, u, nil, nil, -1, noAuth, nil) + resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil) if err == nil { if resp.StatusCode != http.StatusOK { err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) @@ -288,7 +293,7 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool { } // splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks -func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) { +func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { defer close(streams) defer close(errs) currentOffset := uint64(0) @@ -302,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, break } toSkip := c.Offset - currentOffset - if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { errs <- err break } @@ -310,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } s := signalCloseReader{ closed: make(chan interface{}), - stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), + stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), consumeStream: true, } streams <- s @@ -322,7 +327,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } // handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. -func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) { +func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) { defer close(streams) defer close(errs) if !strings.HasPrefix(mediaType, "multipart/") { @@ -357,9 +362,12 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } } -// GetBlobAt returns a stream for the specified blob. +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. // The specified chunks must be not overlapping and sorted by their offset. -func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) var rangeVals []string @@ -401,7 +409,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, return streams, errs, nil case http.StatusBadRequest: res.Body.Close() - return nil, nil, internalTypes.BadPartialRequestError{Status: res.Status} + return nil, nil, private.BadPartialRequestError{Status: res.Status} default: err := httpResponseToError(res, "Error fetching partial blob") if err == nil { @@ -506,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( switch url.Scheme { case "file": logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) + sig, err := os.ReadFile(url.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil @@ -516,7 +524,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( return sig, false, nil case "http", "https": - logrus.Debugf("GET %s", url) + logrus.Debugf("GET %s", url.Redacted()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) if err != nil { return nil, false, err @@ -529,7 +537,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( if res.StatusCode == http.StatusNotFound { return nil, true, nil } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) + return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode)) } sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) if err != nil { @@ -538,7 +546,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( return sig, false, nil default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) + return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.Redacted()) } } @@ -602,8 +610,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) } - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) + manifestDigest, err := manifest.Digest(manifestBody) + if err != nil { + return fmt.Errorf("computing manifest digest: %w", err) + } + deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) // When retrieving the digest from a registry >= 2.3 use the following header: // "Accept": "application/vnd.docker.distribution.manifest.v2+json" @@ -621,11 +632,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) } - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - for i := 0; ; i++ { url := signatureStorageURL(c.signatureBase, manifestDigest, i) missing, err := c.deleteOneSignature(url) @@ -756,7 +762,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) { func (s signalCloseReader) Close() error { defer close(s.closed) if s.consumeStream { - if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { + if _, err := io.Copy(io.Discard, s.stream); err != nil { s.stream.Close() return err } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index 6164ceb66ed..c77c002d15e 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -4,7 +4,6 @@ import ( "archive/tar" "encoding/json" "io" - "io/ioutil" "os" "path" @@ -53,7 +52,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { // The caller should call .Close() on the returned archive when done. func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { // Save inputStream to a temporary file - tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") if err != nil { return nil, errors.Wrap(err, "creating temporary file") } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go index b8d84d2452a..8e9be17c189 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -6,7 +6,6 @@ import ( "context" "encoding/json" "io" - "io/ioutil" "os" "path" "sync" @@ -170,7 +169,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif uncompressedSize := h.Size if isCompressed { - uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) if err != nil { return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) } @@ -263,7 +262,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B } if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, diff --git a/vendor/github.com/containers/image/v5/docker/lookaside.go b/vendor/github.com/containers/image/v5/docker/lookaside.go index 515e59327d3..d0a3f1be069 100644 --- a/vendor/github.com/containers/image/v5/docker/lookaside.go +++ b/vendor/github.com/containers/image/v5/docker/lookaside.go @@ -2,7 +2,6 @@ package docker import ( "fmt" - "io/ioutil" "net/url" "os" "path" @@ -82,7 +81,7 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, } else { // returns default directory if no sigstore specified in configuration file url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID()) - logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String()) + logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted()) } // NOTE: Keep this in sync with docs/signature-protocols.md! // FIXME? Restrict to explicitly supported schemes? @@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { continue } configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) + configBytes, err := os.ReadFile(configPath) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go new file mode 100644 index 00000000000..7b15871f7b6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go @@ -0,0 +1,6 @@ +package reference + +// Return true if the specified string fully matches `IdentifierRegexp`. +func IsFullIdentifier(s string) bool { + return anchoredIdentifierRegexp.MatchString(s) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 00000000000..82734a6cdc3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,69 @@ +package imagedestination + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ImageDestination: dest} +} + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + types.ImageDestination +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (w *wrapped) SupportsPutBlobPartial() bool { + return false +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { + return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name()) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go new file mode 100644 index 00000000000..fe1be8d9eab --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -0,0 +1,47 @@ +package imagesource + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(src) returns an object that provides the private.ImageSource API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageSource, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageSource. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(src types.ImageSource) private.ImageSource { + if src2, ok := src.(private.ImageSource); ok { + return src2 + } + return &wrapped{ImageSource: src} +} + +// wrapped provides the private.ImageSource operations +// for a source that only implements types.ImageSource +type wrapped struct { + types.ImageSource +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (w *wrapped) SupportsGetBlobAt() bool { + return false +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name()) +} diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go index 3fed1995cb8..49fa410e91f 100644 --- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -2,7 +2,6 @@ package iolimits import ( "io" - "io/ioutil" "github.com/pkg/errors" ) @@ -47,7 +46,7 @@ const ( func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { limitedReader := io.LimitReader(reader, int64(limit+1)) - res, err := ioutil.ReadAll(limitedReader) + res, err := io.ReadAll(limitedReader) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go deleted file mode 100644 index bf6cc87d421..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// Key represents a single key linked to one or more kernel keyrings. -type Key struct { - Name string - - id, ring keyID - size int -} - -// ID returns the 32-bit kernel identifier for a specific key -func (k *Key) ID() int32 { - return int32(k.id) -} - -// Get the key's value as a byte slice -func (k *Key) Get() ([]byte, error) { - var ( - b []byte - err error - sizeRead int - ) - - if k.size == 0 { - k.size = 512 - } - - size := k.size - - b = make([]byte, int(size)) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - k.size = sizeRead - } - } - return b[:k.size], err -} - -// Unlink a key from the keyring it was loaded from (or added to). If the key -// is not linked to any other keyrings, it is destroyed. -func (k *Key) Unlink() error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) - return err -} - -// Describe returns a string describing the attributes of a specified key -func (k *Key) Describe() (string, error) { - keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) - if err != nil { - return "", err - } - return keyAttr, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go deleted file mode 100644 index 5eaad615c7c..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) -package keyctl - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Keyring is the basic interface to a linux keyctl keyring. -type Keyring interface { - ID - Add(string, []byte) (*Key, error) - Search(string) (*Key, error) -} - -type keyring struct { - id keyID -} - -// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. -type ID interface { - ID() int32 -} - -// Add a new key to a keyring. The key can be searched for later by name. -func (kr *keyring) Add(name string, key []byte) (*Key, error) { - r, err := unix.AddKey("user", name, key, int(kr.id)) - if err == nil { - key := &Key{Name: name, id: keyID(r), ring: kr.id} - return key, nil - } - return nil, err -} - -// Search for a key by name, this also searches child keyrings linked to this -// one. The key, if found, is linked to the top keyring that Search() was called -// from. -func (kr *keyring) Search(name string) (*Key, error) { - id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) - if err == nil { - return &Key{Name: name, id: keyID(id), ring: kr.id}, nil - } - return nil, err -} - -// ID returns the 32-bit kernel identifier of a keyring -func (kr *keyring) ID() int32 { - return int32(kr.id) -} - -// SessionKeyring returns the current login session keyring -func SessionKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) -} - -// UserKeyring returns the keyring specific to the current user. -func UserKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_USER_KEYRING) -} - -// Unlink an object from a keyring -func Unlink(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// Link a key into a keyring -func Link(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it -func ReadUserKeyring() ([]*Key, error) { - var ( - b []byte - err error - sizeRead int - ) - krSize := 4 - size := krSize - b = make([]byte, size) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - krSize = sizeRead - } - } - keyIDs := getKeyIDsFromByte(b[:krSize]) - return keyIDs, err -} - -func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { - idSize := 4 - var keys []*Key - for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { - tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) - keys = append(keys, &Key{id: keyID(tempID)}) - } - return keys -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go deleted file mode 100644 index 5f4d2157ae9..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// KeyPerm represents in-kernel access control permission to keys and keyrings -// as a 32-bit integer broken up into four permission sets, one per byte. -// In MSB order, the perms are: Processor, User, Group, Other. -type KeyPerm uint32 - -const ( - // PermOtherAll sets all permission for Other - PermOtherAll KeyPerm = 0x3f << (8 * iota) - // PermGroupAll sets all permission for Group - PermGroupAll - // PermUserAll sets all permission for User - PermUserAll - // PermProcessAll sets all permission for Processor - PermProcessAll -) - -// SetPerm sets the permissions on a key or keyring. -func SetPerm(k ID, p KeyPerm) error { - err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) - return err -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go deleted file mode 100644 index f61666e42c2..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -type keyID int32 - -func newKeyring(id keyID) (*keyring, error) { - r1, err := unix.KeyctlGetKeyringID(int(id), true) - if err != nil { - return nil, err - } - - if id < 0 { - r1 = int(id) - } - return &keyring{id: keyID(r1)}, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go new file mode 100644 index 00000000000..65788651fb5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -0,0 +1,106 @@ +package private + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// ImageSource is an internal extension to the types.ImageSource interface. +type ImageSource interface { + types.ImageSource + + // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. + SupportsGetBlobAt() bool + // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). + BlobChunkAccessor +} + +// ImageDestination is an internal extension to the types.ImageDestination +// interface. +type ImageDestination interface { + types.ImageDestination + + // SupportsPutBlobPartial returns true if PutBlobPartial is supported. + SupportsPutBlobPartial() bool + + // PutBlobWithOptions writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error) + + // PutBlobPartial attempts to create a blob using the data that is already present + // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. + // It is available only if SupportsPutBlobPartial(). + // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller + // should fall back to PutBlobWithOptions. + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) + + // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error) +} + +// PutBlobOptions are used in PutBlobWithOptions. +type PutBlobOptions struct { + Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos. + IsConfig bool // True if the blob is a config + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. +} + +// TryReusingBlobOptions are used in TryReusingBlobWithOptions. +type TryReusingBlobOptions struct { + Cache types.BlobInfoCache // Cache to use and/or update. + // If true, it is allowed to use an equivalent of the desired blob; + // in that case the returned info may not match the input. + CanSubstitute bool + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. +} + +// ImageSourceChunk is a portion of a blob. +// This API is experimental and can be changed without bumping the major version number. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// BlobChunkAccessor allows fetching discontiguous chunks of a blob. +type BlobChunkAccessor interface { + // GetBlobAt returns a sequential channel of readers that contain data for the requested + // blob chunks, and a channel that might get a single error value. + // The specified chunks must be not overlapping and sorted by their offset. + // The readers must be fully consumed, in the order they are returned, before blocking + // to read the next chunk. + GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request. +type BadPartialRequestError struct { + Status string +} + +func (e BadPartialRequestError) Error() string { + return e.Status +} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go index 306220585b6..84bb656ac71 100644 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -3,7 +3,6 @@ package streamdigest import ( "fmt" "io" - "io/ioutil" "os" "github.com/containers/image/v5/internal/putblobdigest" @@ -16,7 +15,7 @@ import ( // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // If an error occurs, inputInfo is not modified. func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") if err != nil { return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) } diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go deleted file mode 100644 index 388f8cf3b49..00000000000 --- a/vendor/github.com/containers/image/v5/internal/types/types.go +++ /dev/null @@ -1,91 +0,0 @@ -package types - -import ( - "context" - "io" - - "github.com/containers/image/v5/docker/reference" - publicTypes "github.com/containers/image/v5/types" -) - -// ImageDestinationWithOptions is an internal extension to the ImageDestination -// interface. -type ImageDestinationWithOptions interface { - publicTypes.ImageDestination - - // PutBlobWithOptions is a wrapper around PutBlob. If - // options.LayerIndex is set, the blob will be committed directly. - // Either by the calling goroutine or by another goroutine already - // committing layers. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error) - - // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If - // options.LayerIndex is set, the reused blob will be recoreded as - // already pulled. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error) -} - -// PutBlobOptions are used in PutBlobWithOptions. -type PutBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Denotes whether the blob is a config or not. - IsConfig bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int -} - -// TryReusingBlobOptions are used in TryReusingBlobWithOptions. -type TryReusingBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Use an equivalent of the desired blob. - CanSubstitute bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int - // The reference of the image that contains the target blob. - SrcRef reference.Named -} - -// ImageSourceChunk is a portion of a blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceChunk struct { - Offset uint64 - Length uint64 -} - -// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceSeekable interface { - // GetBlobAt returns a stream for the specified blob. - // The specified chunks must be not overlapping and sorted by their offset. - GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error) -} - -// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable. -// This API is experimental and can be changed without bumping the major version number. -type ImageDestinationPartial interface { - // PutBlobPartial writes contents of stream and returns data representing the result. - PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error) -} - -// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request. -type BadPartialRequestError struct { - Status string -} - -func (e BadPartialRequestError) Error() string { - return e.Status -} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 511cdcc37e5..20955ab7ff4 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -51,7 +51,7 @@ const ( // other than the ones the caller specifically allows. // expectedMIMEType is used only for diagnostics. // NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambigous +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous // data that just isn’t what was expected, as opposed to actually ambiguous data. func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, allowed allowedManifestFields) error { @@ -71,7 +71,7 @@ func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, Manifests interface{} `json:"manifests"` }{} if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this shold not happen; + // The caller was supposed to already validate version numbers, so this should not happen; // let’s not bother with making this error “nice”. return err } diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 4b644f2539a..2e3e5da15ea 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -110,7 +110,8 @@ func GuessMIMEType(manifest []byte) string { } switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. return meta.MediaType } // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. @@ -121,9 +122,9 @@ func GuessMIMEType(manifest []byte) string { } return DockerV2Schema1MediaType case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. ociMan := struct { Config struct { MediaType string `json:"mediaType"` diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index c4616b96500..5892184df19 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -66,6 +66,7 @@ func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descript return &OCI1{ imgspecv1.Manifest{ Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, Config: config, Layers: layers, }, diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index 5bec43ff998..c4f11e09c56 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -119,6 +119,7 @@ func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[ index := OCI1Index{ imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: make([]imgspecv1.Descriptor, len(components)), Annotations: dupStringStringMap(annotations), }, @@ -195,6 +196,7 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { index := OCI1Index{ Index: imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: []imgspecv1.Descriptor{}, Annotations: make(map[string]string), }, diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 54d325d34df..4fa9127659b 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -3,7 +3,6 @@ package archive import ( "context" "fmt" - "io/ioutil" "os" "strings" @@ -161,7 +160,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") if err != nil { return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory") } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index d0ee7263527..77e8fd87637 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -112,7 +111,7 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *ociImageDestination) HasThreadSafePutBlob() bool { - return false + return true } // PutBlob writes contents of stream and returns data representing the result. @@ -124,7 +123,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") + blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } @@ -238,7 +237,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc if err := ensureParentDirectoryExists(blobPath); err != nil { return err } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { + if err := os.WriteFile(blobPath, m, 0644); err != nil { return err } @@ -309,14 +308,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][] // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { + if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) if err != nil { return err } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) + return os.WriteFile(d.ref.indexPath(), indexJSON, 0644) } func ensureDirectoryExists(path string) error { diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 9d8ab689ba4..8973f461c9a 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -3,7 +3,6 @@ package layout import ( "context" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -93,7 +92,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest return nil, "", err } - m, err := ioutil.ReadFile(manifestPath) + m, err := os.ReadFile(manifestPath) if err != nil { return nil, "", err } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 4ffbced6bd6..a6473ae68f4 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -5,7 +5,6 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" "net" "net/http" "net/url" @@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { // loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile // LoadFromFile takes a filename and deserializes the contents into Config object func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) + kubeconfigBytes, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index c7c6cf6945a..67612d800c2 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -95,7 +95,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re req.Header.Set("Content-Type", "application/json") } - logrus.Debugf("%s %s", method, url.String()) + logrus.Debugf("%s %s", method, url.Redacted()) res, err := c.httpClient.Do(req) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 3eb2a2cba22..011118fa52b 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -148,7 +147,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") + tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") if err != nil { return types.BlobInfo{}, err } @@ -180,20 +179,24 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) + entries, err := os.ReadDir(dir) if err != nil { return err } - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + for _, entry := range entries { + fullpath := filepath.Join(dir, entry.Name()) + if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { if err := os.Remove(fullpath); err != nil { return err } continue } + info, err := entry.Info() + if err != nil { + return err + } if selinuxHnd != nil { relPath, err := filepath.Rel(root, fullpath) if err != nil { @@ -223,7 +226,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user } } - if info.IsDir() { + if entry.IsDir() { if usermode { if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { return err @@ -233,7 +236,7 @@ func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, user if err != nil { return err } - } else if usermode && (info.Mode().IsRegular()) { + } else if usermode && (entry.Type().IsRegular()) { if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { return err } @@ -405,7 +408,7 @@ func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob [ } d.digest = digest - return ioutil.WriteFile(manifestPath, manifestBlob, 0644) + return os.WriteFile(manifestPath, manifestBlob, 0644) } // PutSignatures writes signatures to the destination. @@ -423,7 +426,7 @@ func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [ for i, sig := range signatures { signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { + if err := os.WriteFile(signaturePath, sig, 0644); err != nil { return err } } diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go index d30c764a630..1e1f2be03cd 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -9,7 +9,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "strconv" "strings" "unsafe" @@ -369,7 +368,7 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d } defer sigReader.Close() - sig, err := ioutil.ReadAll(sigReader) + sig, err := os.ReadAll(sigReader) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go new file mode 100644 index 00000000000..8b22733ac3a --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go @@ -0,0 +1,538 @@ +package blobcache + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + _ types.ImageReference = &BlobCache{} + _ types.ImageSource = &blobCacheSource{} + _ types.ImageDestination = &blobCacheDestination{} +) + +const ( + compressedNote = ".compressed" + decompressedNote = ".decompressed" +) + +// BlobCache is an object which saves copies of blobs that are written to it while passing them +// through to some real destination, and which can be queried directly in order to read them +// back. +// +// Implements types.ImageReference. +type BlobCache struct { + reference types.ImageReference + // WARNING: The contents of this directory may be accessed concurrently, + // both within this process and by multiple different processes + directory string + compress types.LayerCompression +} + +type blobCacheSource struct { + reference *BlobCache + source types.ImageSource + sys types.SystemContext + // this mutex synchronizes the counters below + mu sync.Mutex + cacheHits int64 + cacheMisses int64 + cacheErrors int64 +} + +type blobCacheDestination struct { + reference *BlobCache + destination types.ImageDestination +} + +func makeFilename(blobSum digest.Digest, isConfig bool) string { + if isConfig { + return blobSum.String() + ".config" + } + return blobSum.String() +} + +// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are +// written to the destination image created from the resulting reference will also be stored +// as-is to the specified directory or a temporary directory. +// The compress argument controls whether or not the cache will try to substitute a compressed +// or different version of a blob when preparing the list of layers when reading an image. +func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (*BlobCache, error) { + if directory == "" { + return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) + } + switch compress { + case types.Compress, types.Decompress, types.PreserveOriginal: + // valid value, accept it + default: + return nil, errors.Errorf("unhandled LayerCompression value %v", compress) + } + return &BlobCache{ + reference: ref, + directory: directory, + compress: compress, + }, nil +} + +func (b *BlobCache) Transport() types.ImageTransport { + return b.reference.Transport() +} + +func (b *BlobCache) StringWithinTransport() string { + return b.reference.StringWithinTransport() +} + +func (b *BlobCache) DockerReference() reference.Named { + return b.reference.DockerReference() +} + +func (b *BlobCache) PolicyConfigurationIdentity() string { + return b.reference.PolicyConfigurationIdentity() +} + +func (b *BlobCache) PolicyConfigurationNamespaces() []string { + return b.reference.PolicyConfigurationNamespaces() +} + +func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return b.reference.DeleteImage(ctx, sys) +} + +func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, nil + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(b.directory, makeFilename(blobinfo.Digest, isConfig)) + fileInfo, err := os.Stat(filename) + if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { + return true, fileInfo.Size(), nil + } + if !os.IsNotExist(err) { + return false, -1, errors.Wrap(err, "checking size") + } + } + + return false, -1, nil +} + +func (b *BlobCache) Directory() string { + return b.directory +} + +func (b *BlobCache) ClearCache() error { + f, err := os.Open(b.directory) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + return errors.Wrapf(err, "error reading directory %q", b.directory) + } + for _, name := range names { + pathname := filepath.Join(b.directory, name) + if err = os.RemoveAll(pathname); err != nil { + return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(b)) + } + } + return nil +} + +func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := b.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(b.reference)) + } + return image.FromSource(ctx, sys, src) +} + +func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + src, err := b.reference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(b.reference)) + } + logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress) + return &blobCacheSource{reference: b, source: src, sys: *sys}, nil +} + +func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + dest, err := b.reference.NewImageDestination(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(b.reference)) + } + logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory) + return &blobCacheDestination{reference: b, destination: dest}, nil +} + +func (s *blobCacheSource) Reference() types.ImageReference { + return s.reference +} + +func (s *blobCacheSource) Close() error { + logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) + return s.source.Close() +} + +func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) + manifestBytes, err := os.ReadFile(filename) + if err == nil { + s.cacheHits++ + return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil + } + if !os.IsNotExist(err) { + s.cacheErrors++ + return nil, "", errors.Wrap(err, "checking for manifest file") + } + } + s.cacheMisses++ + return s.source.GetManifest(ctx, instanceDigest) +} + +func (s *blobCacheSource) HasThreadSafeGetBlob() bool { + return s.source.HasThreadSafeGetBlob() +} + +func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + present, size, err := s.reference.HasBlob(blobinfo) + if err != nil { + return nil, -1, err + } + if present { + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + s.mu.Lock() + s.cacheHits++ + s.mu.Unlock() + return f, size, nil + } + if !os.IsNotExist(err) { + s.mu.Lock() + s.cacheErrors++ + s.mu.Unlock() + return nil, -1, errors.Wrap(err, "checking for cache") + } + } + } + s.mu.Lock() + s.cacheMisses++ + s.mu.Unlock() + rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) + if err != nil { + return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) + } + return rc, size, nil +} + +func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.source.GetSignatures(ctx, instanceDigest) +} + +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignatures(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) + } + canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) + + infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + if infos == nil { + img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) + if err != nil { + return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + infos = img.LayerInfos() + } + + if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { + replacedInfos := make([]types.BlobInfo, 0, len(infos)) + for _, info := range infos { + var replaceDigest []byte + var err error + blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) + alternate := "" + switch s.reference.compress { + case types.Compress: + alternate = blobFile + compressedNote + replaceDigest, err = os.ReadFile(alternate) + case types.Decompress: + alternate = blobFile + decompressedNote + replaceDigest, err = os.ReadFile(alternate) + } + if err == nil && digest.Digest(replaceDigest).Validate() == nil { + alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) + fileInfo, err := os.Stat(alternate) + if err == nil { + switch info.MediaType { + case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: + switch s.reference.compress { + case types.Compress: + info.MediaType = v1.MediaTypeImageLayerGzip + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + info.MediaType = v1.MediaTypeImageLayer + info.CompressionAlgorithm = nil + } + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType: + switch s.reference.compress { + case types.Compress: + info.MediaType = manifest.DockerV2Schema2LayerMediaType + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + // nope, not going to suggest anything, it's not allowed by the spec + replacedInfos = append(replacedInfos, info) + continue + } + } + logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) + info.CompressionOperation = s.reference.compress + info.Digest = digest.Digest(replaceDigest) + info.Size = fileInfo.Size() + logrus.Debugf("info = %#v", info) + } + } + replacedInfos = append(replacedInfos, info) + } + infos = replacedInfos + } + + return infos, nil +} + +func (d *blobCacheDestination) Reference() types.ImageReference { + return d.reference +} + +func (d *blobCacheDestination) Close() error { + logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) + return d.destination.Close() +} + +func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { + return d.destination.SupportedManifestMIMETypes() +} + +func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { + return d.destination.SupportsSignatures(ctx) +} + +func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { + return d.destination.DesiredLayerCompression() +} + +func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { + return d.destination.AcceptsForeignLayerURLs() +} + +func (d *blobCacheDestination) MustMatchRuntimeOS() bool { + return d.destination.MustMatchRuntimeOS() +} + +func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { + return d.destination.IgnoresEmbeddedDockerReference() +} + +// Decompress and save the contents of the decompressReader stream into the passed-in temporary +// file. If we successfully save all of the data, rename the file to match the digest of the data, +// and make notes about the relationship between the file that holds a copy of the compressed data +// and this new file. +func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { + defer wg.Done() + // Decompress from and digest the reading end of that pipe. + decompressed, err3 := archive.DecompressStream(decompressReader) + digester := digest.Canonical.Digester() + if err3 == nil { + // Read the decompressed data through the filter over the pipe, blocking until the + // writing end is closed. + _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) + } else { + // Drain the pipe to keep from stalling the PutBlob() thread. + if _, err := io.Copy(io.Discard, decompressReader); err != nil { + logrus.Debugf("error draining the pipe: %v", err) + } + } + decompressReader.Close() + decompressed.Close() + tempFile.Close() + // Determine the name that we should give to the uncompressed copy of the blob. + decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) + if err3 == nil { + // Rename the temporary file. + if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { + logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } else { + *alternateDigest = digester.Digest() + // Note the relationship between the two files. + if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { + logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) + } + if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { + logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) + } + } + } else { + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } +} + +func (d *blobCacheDestination) HasThreadSafePutBlob() bool { + return d.destination.HasThreadSafePutBlob() +} + +func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + var tempfile *os.File + var err error + var n int + var alternateDigest digest.Digest + var closer io.Closer + wg := new(sync.WaitGroup) + needToWait := false + compression := archive.Uncompressed + if inputInfo.Digest != "" { + filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + tempfile, err = os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err == nil { + stream = io.TeeReader(stream, tempfile) + defer func() { + if err == nil { + if err = os.Rename(tempfile.Name(), filename); err != nil { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) + } + } else { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + } + tempfile.Close() + }() + } else { + logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) + } + if !isConfig { + initial := make([]byte, 8) + n, err = stream.Read(initial) + if n > 0 { + // Build a Reader that will still return the bytes that we just + // read, for PutBlob()'s sake. + stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) + if n >= len(initial) { + compression = archive.DetectCompression(initial[:n]) + } + if compression == archive.Gzip { + // The stream is compressed, so create a file which we'll + // use to store a decompressed copy. + decompressedTemp, err2 := os.CreateTemp(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err2 != nil { + logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) + decompressedTemp.Close() + } else { + // Write a copy of the compressed data to a pipe, + // closing the writing end of the pipe after + // PutBlob() returns. + decompressReader, decompressWriter := io.Pipe() + closer = decompressWriter + stream = io.TeeReader(stream, decompressWriter) + // Let saveStream() close the reading end and handle the temporary file. + wg.Add(1) + needToWait = true + go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) + } + } + } + } + } + newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) + if closer != nil { + closer.Close() + } + if needToWait { + wg.Wait() + } + if err != nil { + return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) + } + if alternateDigest.Validate() == nil { + logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) + } else { + logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) + } + return newBlobInfo, nil +} + +func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) + if err != nil || present { + return present, reusedInfo, err + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + defer f.Close() + uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) + if err != nil { + return false, types.BlobInfo{}, err + } + return true, uploadedInfo, nil + } + } + + return false, types.BlobInfo{}, nil +} + +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) + } else { + filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) + if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { + logrus.Warnf("error saving manifest as %q: %v", filename, err) + } + } + return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) +} + +func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.destination.PutSignatures(ctx, signatures, instanceDigest) +} + +func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.destination.Commit(ctx, unparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index c28e8179296..34c90dd77e4 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -5,7 +5,6 @@ import ( "compress/bzip2" "fmt" "io" - "io/ioutil" "github.com/containers/image/v5/pkg/compression/internal" "github.com/containers/image/v5/pkg/compression/types" @@ -65,7 +64,7 @@ func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { - return ioutil.NopCloser(bzip2.NewReader(r)), nil + return io.NopCloser(bzip2.NewReader(r)), nil } // XzDecompressor is a DecompressorFunc for the xz compression algorithm. @@ -74,7 +73,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) { if err != nil { return nil, err } - return ioutil.NopCloser(r), nil + return io.NopCloser(r), nil } // gzipCompressor is a CompressorFunc for the gzip compression algorithm. @@ -161,7 +160,7 @@ func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { return nil, false, errors.Wrapf(err, "initializing decompression") } } else { - res = ioutil.NopCloser(stream) + res = io.NopCloser(stream) } return res, decompressor != nil, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 63f5bd53e1b..d0bdd08e9a7 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -4,7 +4,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -15,6 +14,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/hashicorp/go-multierror" @@ -54,8 +54,8 @@ var ( // SetCredentials stores the username and password in a location // appropriate for sys and the users’ configuration. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. // Returns a human-redable description of the location that was updated. // NOTE: The return value is only intended to be read by humans; its form is not an API, // it may change (or new forms can be added) any time. @@ -128,11 +128,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // possible sources, and then call `GetCredentials` on them. That // prevents us from having to reverse engineer the logic in // `GetCredentials`. - allRegistries := make(map[string]bool) - addRegistry := func(s string) { - allRegistries[s] = true + allKeys := make(map[string]bool) + addKey := func(s string) { + allKeys[s] = true } + // To use GetCredentials, we must at least convert the URL forms into host names. + // While we're at it, we’ll also canonicalize docker.io to the standard format. + normalizedDockerIORegistry := normalizeRegistry("docker.io") + helpers, err := sysregistriesv2.CredentialHelpers(sys) if err != nil { return nil, err @@ -151,10 +155,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // direct mapping to a registry, so we can just // walk the map. for registry := range auths.CredHelpers { - addRegistry(registry) + addKey(registry) } - for registry := range auths.AuthConfigs { - addRegistry(registry) + for key := range auths.AuthConfigs { + key := normalizeAuthFileKey(key, path.legacyFormat) + if key == normalizedDockerIORegistry { + key = "docker.io" + } + addKey(key) } } // External helpers. @@ -166,7 +174,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon switch errors.Cause(err) { case nil: for registry := range creds { - addRegistry(registry) + addKey(registry) } case exec.ErrNotFound: // It's okay if the helper doesn't exist. @@ -179,17 +187,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. authConfigs := make(map[string]types.DockerAuthConfig) - for registry := range allRegistries { - authConf, err := GetCredentials(sys, registry) + for key := range allKeys { + authConf, err := GetCredentials(sys, key) if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - // Ignore if the credentials could not be found (anymore). - continue - } // Note: we rely on the logging in `GetCredentials`. return nil, err } - authConfigs[registry] = authConf + if authConf != (types.DockerAuthConfig{}) { + authConfigs[key] = authConf + } } return authConfigs, nil @@ -230,16 +236,14 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { return paths } -// GetCredentials returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json -// file or .docker/config.json, including support for OAuth2 and IdentityToken. +// GetCredentials returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. // If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // -// GetCredentialsForRef should almost always be used in favor of this API to -// allow different credentials for different repositories on the same registry. -func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get()) +// GetCredentialsForRef should almost always be used in favor of this API. +func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, key, homedir.Get()) } // GetCredentialsForRef returns the registry credentials necessary for @@ -247,35 +251,39 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // appropriate for sys and the users’ configuration. // If an entry is not found, an empty struct is returned. func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, ref, reference.Domain(ref), homedir.Get()) + return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) } // getCredentialsWithHomeDir is an internal implementation detail of // GetCredentialsForRef and GetCredentials. It exists only to allow testing it // with an artificial home directory. -func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, registry, homeDir string) (types.DockerAuthConfig, error) { - // consistency check of the ref and registry arguments - if ref != nil && reference.Domain(ref) != registry { - return types.DockerAuthConfig{}, errors.Errorf( - "internal error: provided reference domain %q name does not match registry %q", - reference.Domain(ref), registry, - ) +func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { + _, err := validateKey(key) + if err != nil { + return types.DockerAuthConfig{}, err } if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry) + logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) return *sys.DockerAuthConfig, nil } + var registry string // We compute this once because it is used in several places. + if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { + registry = key[:firstSlash] + } else { + registry = key + } + // Anonymous function to query credentials from auth files. getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat) + authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat) if err != nil { return types.DockerAuthConfig{}, "", err } - if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { + if authConfig != (types.DockerAuthConfig{}) { return authConfig, path.path, nil } } @@ -291,57 +299,61 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re for _, helper := range helpers { var ( creds types.DockerAuthConfig + helperKey string credHelperPath string err error ) switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: + helperKey = key creds, credHelperPath, err = getCredentialsFromAuthFiles() // External helpers. default: + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers, but a "registry" is a valid parent of "key". + helperKey = registry creds, err = getAuthFromCredHelper(helper, registry) } if err != nil { - logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err) + logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) multiErr = multierror.Append(multiErr, err) continue } - if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 { - continue - } - msg := fmt.Sprintf("Found credentials for %s in credential helper %s", registry, helper) - if credHelperPath != "" { - msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + if creds != (types.DockerAuthConfig{}) { + msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) + if credHelperPath != "" { + msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + } + logrus.Debug(msg) + return creds, nil } - logrus.Debug(msg) - return creds, nil } if multiErr != nil { return types.DockerAuthConfig{}, multiErr } - logrus.Debugf("No credentials for %s found", registry) + logrus.Debugf("No credentials for %s found", key) return types.DockerAuthConfig{}, nil } -// GetAuthentication returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json file or -// .docker/config.json +// GetAuthentication returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // // Deprecated: This API only has support for username and password. To get the // support for oauth2 in container registry authentication, we added the new -// GetCredentials API. The new API should be used and this API is kept to +// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to // maintain backward compatibility. -func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { + return getAuthenticationWithHomeDir(sys, key, homedir.Get()) } // getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, // it exists only to allow testing it with an artificial home directory. -func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { - auth, err := getCredentialsWithHomeDir(sys, nil, registry, homeDir) +func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, key, homeDir) if err != nil { return "", "", err } @@ -353,8 +365,8 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st // RemoveAuthentication removes credentials for `key` from all possible // sources such as credential helpers and auth files. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. func RemoveAuthentication(sys *types.SystemContext, key string) error { isNamespaced, err := validateKey(key) if err != nil { @@ -531,7 +543,7 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, e func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { var auths dockerConfigFile - raw, err := ioutil.ReadFile(path) + raw, err := os.ReadFile(path) if err != nil { if os.IsNotExist(err) { auths.AuthConfigs = map[string]dockerAuthConfig{} @@ -593,7 +605,7 @@ func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) ( return "", errors.Wrapf(err, "marshaling JSON %q", path) } - if err = ioutil.WriteFile(path, newData, 0600); err != nil { + if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { return "", errors.Wrapf(err, "writing to file %q", path) } } @@ -606,6 +618,10 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) + err = nil + } return types.DockerAuthConfig{}, err } @@ -639,26 +655,28 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { return helperclient.Erase(p, registry) } -// findAuthentication looks for auth of registry in path. If ref is -// not nil, then it will be taken into account when looking up the -// authentication credentials. -func findAuthentication(ref reference.Named, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { +// findCredentialsInFile looks for credentials matching "key" +// (which is "registry" or a namespace in "registry") in "path". +func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { auths, err := readJSONFile(path, legacyFormat) if err != nil { return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path) } // First try cred helpers. They should always be normalized. + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers. if ch, exists := auths.CredHelpers[registry]; exists { + logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path) return getAuthFromCredHelper(ch, registry) } - // Support for different paths in auth. + // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) var keys []string - if !legacyFormat && ref != nil { - keys = authKeysForRef(ref) + if !legacyFormat { + keys = authKeysForKey(key) } else { keys = []string{registry} } @@ -686,26 +704,28 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat } } + // Only log this if we found nothing; getCredentialsWithHomeDir logs the + // source of found data. + logrus.Debugf("No credentials matching %s found in %s", key, path) return types.DockerAuthConfig{}, nil } -// authKeysForRef returns the valid paths for a provided reference. For example, -// when given a reference "quay.io/repo/ns/image:tag", then it would return +// authKeysForKey returns the keys matching a provided auth file key, in order +// from the best match to worst. For example, +// when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForRef(ref reference.Named) (res []string) { - name := ref.Name() - +func authKeysForKey(key string) (res []string) { for { - res = append(res, name) + res = append(res, key) - lastSlash := strings.LastIndex(name, "/") + lastSlash := strings.LastIndex(key, "/") if lastSlash == -1 { break } - name = name[:lastSlash] + key = key[:lastSlash] } return res @@ -759,11 +779,24 @@ func normalizeRegistry(registry string) string { // validateKey verifies that the input key does not have a prefix that is not // allowed and returns an indicator if the key is namespaced. -func validateKey(key string) (isNamespaced bool, err error) { +func validateKey(key string) (bool, error) { if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { - return isNamespaced, errors.Errorf("key %s contains http[s]:// prefix", key) + return false, errors.Errorf("key %s contains http[s]:// prefix", key) } + // Ideally this should only accept explicitly valid keys, compare + // validateIdentityRemappingPrefix. For now, just reject values that look + // like tagged or digested values. + if strings.ContainsRune(key, '@') { + return false, fmt.Errorf(`key %s contains a '@' character`, key) + } + + firstSlash := strings.IndexRune(key, '/') + isNamespaced := firstSlash != -1 + // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. + if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { + return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) + } // check if the provided key contains one or more subpaths. - return strings.ContainsRune(key, '/'), nil + return isNamespaced, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go deleted file mode 100644 index 0bf16125919..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go +++ /dev/null @@ -1,119 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/containers/image/v5/internal/pkg/keyctl" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// NOTE: none of the functions here are currently used. If we ever want to -// re-enable keyring support, we should introduce a similar built-in credential -// helpers as for `sysregistriesv2.AuthenticationFileHelper`. - -const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return "", "", err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return "", "", err - } - authData, err := key.Get() - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(authData), "\x00", 2) - if len(parts) != 2 { - return "", "", nil - } - return parts[0], parts[1], nil -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - - if err != nil { - return err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return err - } - return key.Unlink() -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - keys, err := keyctl.ReadUserKeyring() - if err != nil { - return err - } - - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return err - } - - for _, k := range keys { - keyAttr, err := k.Describe() - if err != nil { - return err - } - // split string "type;uid;gid;perm;description" - keyAttrs := strings.SplitN(keyAttr, ";", 5) - if len(keyAttrs) < 5 { - return errors.Errorf("Key attributes of %d are not available", k.ID()) - } - keyDescribe := keyAttrs[4] - if strings.HasPrefix(keyDescribe, keyDescribePrefix) { - err := keyctl.Unlink(userkeyring, k) - if err != nil { - return errors.Wrapf(err, "unlinking key %d", k.ID()) - } - logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) - } - } - return nil -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - keyring, err := keyctl.SessionKeyring() - if err != nil { - return err - } - id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) - if err != nil { - return err - } - - // sets all permission(view,read,write,search,link,set attribute) for current user - // it enables the user to search the key after it linked to user keyring and unlinked from session keyring - err = keyctl.SetPerm(id, keyctl.PermUserAll) - if err != nil { - return err - } - // link the key to userKeyring - userKeyring, err := keyctl.UserKeyring() - if err != nil { - return errors.Wrapf(err, "getting user keyring") - } - err = keyctl.Link(userKeyring, id) - if err != nil { - return errors.Wrapf(err, "linking the key to user keyring") - } - // unlink the key from session keyring - err = keyctl.Unlink(keyring, id) - if err != nil { - return errors.Wrapf(err, "unlinking the key from session keyring") - } - return nil -} - -func genDescription(registry string) string { //nolint:deadcode,unused - return fmt.Sprintf("%s%s", keyDescribePrefix, registry) -} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go deleted file mode 100644 index d9827d8edbc..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !linux && (!386 || !amd64) -// +build !linux -// +build !386 !amd64 - -package config - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - return "", "", ErrNotSupported -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - return ErrNotSupported -} diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index fb0a15b993e..46c10ff631a 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -118,6 +118,7 @@ type Resolved struct { } func (r *Resolved) addCandidate(named reference.Named) { + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r}) } @@ -138,6 +139,8 @@ const ( rationaleUSR // Resolved value has been selected by the user (via the prompt). rationaleUserSelection + // Resolved value has been enforced to use Docker Hub (via SystemContext). + rationaleEnforcedDockerHub ) // Description returns a human-readable description about the resolution @@ -152,6 +155,8 @@ func (r *Resolved) Description() string { return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) case rationaleUSR: return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) + case rationaleEnforcedDockerHub: + return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription) case rationaleUserSelection, rationaleNone: fallthrough default: @@ -265,8 +270,20 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } if !isShort { // no short name - named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed + resolved.addCandidate(shortRef) + return resolved, nil + } + + // Resolve to docker.io only if enforced by the caller (e.g., Podman's + // Docker-compatible REST API). + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "cannot normalize input: %q", name) + } resolved.addCandidate(named) + resolved.rationale = rationaleEnforcedDockerHub + resolved.originDescription = "enforced by caller" return resolved, nil } @@ -295,9 +312,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - resolved.addCandidate(namedAlias) resolved.rationale = rationaleAlias resolved.originDescription = aliasOriginDescription @@ -325,9 +339,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { if err != nil { return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - resolved.addCandidate(named) } @@ -412,6 +423,23 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e var candidates []reference.Named + // Complete the candidates with the specified registries. + completeCandidates := func(registries []string) ([]reference.Named, error) { + for _, reg := range registries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + candidates = append(candidates, named) + } + return candidates, nil + } + + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + return completeCandidates([]string{"docker.io"}) + } + // Strip off the tag to normalize the short name for looking it up in // the config files. isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) @@ -434,9 +462,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - + namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed candidates = append(candidates, namedAlias) } @@ -447,16 +473,5 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e } // Note that "localhost" has precedence over the unqualified-search registries. - for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) { - named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) - if err != nil { - return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) - } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - - candidates = append(candidates, named) - } - - return candidates, nil + return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...)) } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 7122e869fe6..6909ea0a60f 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -13,6 +13,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -315,11 +316,14 @@ func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAlia func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { conf := shortNameAliasConf{} - _, err := toml.DecodeFile(confPath, &conf) + meta, err := toml.DecodeFile(confPath, &conf) if err != nil && !os.IsNotExist(err) { // It's okay if the config doesn't exist. Other errors are not. return nil, nil, errors.Wrapf(err, "loading short-name aliases config file %q", confPath) } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, confPath) + } // Even if we don’t always need the cache, doing so validates the machine-generated config. The // file could still be corrupted by another process or user. diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index c8a603c4ef0..c1753c8457a 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -2,6 +2,7 @@ package sysregistriesv2 import ( "fmt" + "io/fs" "os" "path/filepath" "reflect" @@ -43,6 +44,16 @@ const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" // helper. const AuthenticationFileHelper = "containers-auth.json" +const ( + // configuration values for "pull-from-mirror" + // mirrors will be used for both digest pulls and tag pulls + MirrorAll = "all" + // mirrors will only be used for digest pulls + MirrorByDigestOnly = "digest-only" + // mirrors will only be used for tag pulls + MirrorByTagOnly = "tag-only" +) + // Endpoint describes a remote location of a registry. type Endpoint struct { // The endpoint's remote location. Can be empty iff Prefix contains @@ -53,6 +64,18 @@ type Endpoint struct { // If true, certs verification will be skipped and HTTP (non-TLS) // connections will be allowed. Insecure bool `toml:"insecure,omitempty"` + // PullFromMirror is used for adding restrictions to image pull through the mirror. + // Set to "all", "digest-only", or "tag-only". + // If "digest-only", mirrors will only be used for digest pulls. Pulling images by + // tag can potentially yield different images, depending on which endpoint + // we pull from. Restricting mirrors to pulls by digest avoids that issue. + // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror + // that it is less likely to be out of sync if tags move, it should not be unnecessarily + // used for digest references. + // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry. + // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint. + // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry. + PullFromMirror string `toml:"pull-from-mirror,omitempty"` } // userRegistriesFile is the path to the per user registry configuration file. @@ -115,7 +138,7 @@ type Registry struct { Blocked bool `toml:"blocked,omitempty"` // If true, mirrors will only be used for digest pulls. Pulling images by // tag can potentially yield different images, depending on which endpoint - // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // we pull from. Restricting mirrors to pulls by digest avoids that issue. MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` } @@ -130,17 +153,29 @@ type PullSource struct { // reference. func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { var endpoints []Endpoint - + _, isDigested := ref.(reference.Canonical) if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digest one. - if _, isDigested := ref.(reference.Canonical); isDigested { - endpoints = append(r.Mirrors, r.Endpoint) - } else { - endpoints = []Endpoint{r.Endpoint} + // Only use mirrors when the reference is a digested one. + if isDigested { + endpoints = append(endpoints, r.Mirrors...) } } else { - endpoints = append(r.Mirrors, r.Endpoint) + for _, mirror := range r.Mirrors { + // skip the mirror if per mirror setting exists but reference does not match the restriction + switch mirror.PullFromMirror { + case MirrorByDigestOnly: + if !isDigested { + continue + } + case MirrorByTagOnly: + if isDigested { + continue + } + } + endpoints = append(endpoints, mirror) + } } + endpoints = append(endpoints, r.Endpoint) sources := []PullSource{} for _, ep := range endpoints { @@ -374,6 +409,10 @@ func (config *V2RegistriesConf) postProcessRegistries() error { } } + // validate the mirror usage settings does not apply to primary registry + if reg.PullFromMirror != "" { + return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) + } // make sure mirrors are valid for _, mir := range reg.Mirrors { mir.Location, err = parseLocation(mir.Location) @@ -387,6 +426,14 @@ func (config *V2RegistriesConf) postProcessRegistries() error { if mir.Location == "" { return &InvalidRegistries{s: "invalid condition: mirror location is unset"} } + + if reg.MirrorByDigestOnly && mir.PullFromMirror != "" { + return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)} + } + if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll && + mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly { + return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)} + } } if reg.Location == "" { regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) @@ -597,17 +644,17 @@ func dropInConfigs(wrapper configWrapper) ([]string, error) { dirPaths = append(dirPaths, wrapper.userConfigDirPath) } for _, dirPath := range dirPaths { - err := filepath.Walk(dirPath, + err := filepath.WalkDir(dirPath, // WalkFunc to read additional configs - func(path string, info os.FileInfo, err error) error { + func(path string, d fs.DirEntry, err error) error { switch { case err != nil: // return error (could be a permission problem) return err - case info == nil: + case d == nil: // this should only happen when err != nil but let's be sure return nil - case info.IsDir(): + case d.IsDir(): if path != dirPath { // make sure to not recurse into sub-directories return filepath.SkipDir @@ -877,10 +924,13 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. var combinedTOML tomlConfig - _, err := toml.DecodeFile(path, &combinedTOML) + meta, err := toml.DecodeFile(path, &combinedTOML) if err != nil { return nil, err } + if keys := meta.Undecoded(); len(keys) > 0 { + logrus.Debugf("Failed to decode keys %q from %q", keys, path) + } if combinedTOML.V1RegistriesConf.Nonempty() { // Enforce the v2 format if requested. diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 7e2142b1f58..c766417d0ec 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -2,7 +2,6 @@ package tlsclientconfig import ( "crypto/tls" - "io/ioutil" "net" "net/http" "os" @@ -19,7 +18,7 @@ import ( // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) + fs, err := os.ReadDir(dir) if err != nil { if os.IsNotExist(err) { return nil @@ -35,7 +34,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { fullPath := filepath.Join(dir, f.Name()) if strings.HasSuffix(f.Name(), ".crt") { logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) + data, err := os.ReadFile(fullPath) if err != nil { if os.IsNotExist(err) { // Dangling symbolic link? @@ -81,7 +80,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { return nil } -func hasFile(files []os.FileInfo, name string) bool { +func hasFile(files []os.DirEntry, name string) bool { for _, f := range files { if f.Name() == name { return true diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go new file mode 100644 index 00000000000..70758ad4399 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -0,0 +1,210 @@ +package sif + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +// injectedScriptTargetPath is the path injectedScript should be written to in the created image. +const injectedScriptTargetPath = "/podman/runscript" + +// parseDefFile parses a SIF definition file from reader, +// and returns non-trivial contents of the %environment and %runscript sections. +func parseDefFile(reader io.Reader) ([]string, []string, error) { + type parserState int + const ( + parsingOther parserState = iota + parsingEnvironment + parsingRunscript + ) + + environment := []string{} + runscript := []string{} + + state := parsingOther + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + switch { + case s == `%environment`: + state = parsingEnvironment + case s == `%runscript`: + state = parsingRunscript + case strings.HasPrefix(s, "%"): + state = parsingOther + case state == parsingEnvironment: + if s != "" && !strings.HasPrefix(s, "#") { + environment = append(environment, s) + } + case state == parsingRunscript: + runscript = append(runscript, s) + default: // parsingOther: ignore the line + } + } + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err) + } + return environment, runscript, nil +} + +// generateInjectedScript generates a shell script based on +// SIF definition file %environment and %runscript data, and returns it. +func generateInjectedScript(environment []string, runscript []string) []byte { + script := fmt.Sprintf("#!/bin/bash\n"+ + "%s\n"+ + "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n")) + return []byte(script) +} + +// processDefFile finds sif.DataDeffile in sifImage, if any, +// and returns: +// - the command to run +// - contents of a script to inject as injectedScriptTargetPath, or nil +func processDefFile(sifImage *sif.FileImage) (string, []byte, error) { + var environment, runscript []string + + desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile)) + if err == nil { + environment, runscript, err = parseDefFile(desc.GetReader()) + if err != nil { + return "", nil, err + } + } + + var command string + var injectedScript []byte + if len(environment) == 0 && len(runscript) == 0 { + command = "bash" + injectedScript = nil + } else { + injectedScript = generateInjectedScript(environment, runscript) + command = injectedScriptTargetPath + } + + return command, injectedScript, nil +} + +func writeInjectedScript(extractedRootPath string, injectedScript []byte) error { + if injectedScript == nil { + return nil + } + filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath) + parentDirPath := filepath.Dir(filePath) + if err := os.MkdirAll(parentDirPath, 0755); err != nil { + return fmt.Errorf("creating %s: %w", parentDirPath, err) + } + if err := os.WriteFile(filePath, injectedScript, 0755); err != nil { + return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) + } + return nil +} + +// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath. +// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use, +// if necessary. +func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error { + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + defer os.RemoveAll(extractedRootPath) + + // Almost everything in extractedRootPath comes from squashFSPath. + conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", + extractedRootPath, squashFSPath, extractedRootPath, tarPath) + script := "#!/bin/sh\n" + conversionCommand + "\n" + if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil { + return err + } + defer os.Remove(scriptPath) + + // On top of squashFSPath, we only add injectedScript, if necessary. + if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil { + return err + } + + logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand) + cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("converting image: %w, output: %s", err, string(output)) + } + logrus.Debugf("... finished converting squashfs to tar") + return nil +} + +// convertSIFToElements processes sifImage and creates/returns +// the relevant elements for constructing an OCI-like image: +// - A path to a tar file containing a root filesystem, +// - A command to run. +// The returned tar file path is inside tempDir, which can be assumed to be empty +// at start, and is exclusively used by the current process (i.e. it is safe +// to use hard-coded relative paths within it). +func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { + // We could allocate unique names for all of these using os.{CreateTemp,MkdirTemp}, but tempDir is exclusive, + // so we can just hard-code a set of unique values here. + // We create and/or manage cleanup of these two paths. + squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") + tarPath := filepath.Join(tempDir, "rootfs.tar") + // We only allocate these paths, the user is responsible for cleaning them up. + extractedRootPath := filepath.Join(tempDir, "rootfs") + scriptPath := filepath.Join(tempDir, "script") + + succeeded := false + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need + // to run both creation and consumption of extractedRootPath in the same fakeroot context. + // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2 + // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time. + // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser + // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy. + defer os.Remove(squashFSPath) + defer func() { + if !succeeded { + os.Remove(tarPath) + } + }() + + command, injectedScript, err := processDefFile(sifImage) + if err != nil { + return "", nil, err + } + + rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys)) + if err != nil { + return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err) + } + // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4 + // has an -o option that allows extracting a squashfs from the SIF file directly, + // but that version is not currently available in RHEL 8. + logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) + if err := func() error { // A scope for defer + f, err := os.Create(squashFSPath) + if err != nil { + return err + } + defer f.Close() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { + return err + } + return nil + }(); err != nil { + return "", nil, err + } + logrus.Debugf("... finished creating a temporary squashfs image") + + if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil { + return "", nil, err + } + succeeded = true + return tarPath, []string{command}, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go new file mode 100644 index 00000000000..ccf1259660e --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -0,0 +1,216 @@ +package sif + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +type sifImageSource struct { + ref sifReference + workDir string + layerDigest digest.Digest + layerSize int64 + layerFile string + config []byte + configDigest digest.Digest + manifest []byte +} + +// getBlobInfo returns the digest, and size of the provided file. +func getBlobInfo(path string) (digest.Digest, int64, error) { + f, err := os.Open(path) + if err != nil { + return "", -1, fmt.Errorf("opening %q for reading: %w", path, err) + } + defer f.Close() + + // TODO: Instead of writing the tar file to disk, and reading + // it here again, stream the tar file to a pipe and + // compute the digest while writing it to disk. + logrus.Debugf("Computing a digest of the SIF conversion output...") + digester := digest.Canonical.Digester() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(digester.Hash(), f) + if err != nil { + return "", -1, fmt.Errorf("reading %q: %w", path, err) + } + digest := digester.Digest() + logrus.Debugf("... finished computing the digest of the SIF conversion output") + + return digest, size, nil +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource extracts SIF objects and saves them in a temp directory. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) { + sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) + if err != nil { + return nil, fmt.Errorf("loading SIF file: %w", err) + } + defer func() { + _ = sifImg.UnloadContainer() + }() + + workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + os.RemoveAll(workDir) + } + }() + + layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir) + if err != nil { + return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err) + } + + layerDigest, layerSize, err := getBlobInfo(layerPath) + if err != nil { + return nil, fmt.Errorf("gathering blob information: %w", err) + } + + created := sifImg.ModifiedAt() + config := imgspecv1.Image{ + Created: &created, + Architecture: sifImg.PrimaryArch(), + OS: "linux", + Config: imgspecv1.ImageConfig{ + Cmd: commandLine, + }, + RootFS: imgspecv1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{layerDigest}, + }, + History: []imgspecv1.History{ + { + Created: &created, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + Comment: "imported from SIF, uuid: " + sifImg.ID(), + }, + { + Created: &created, + CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]", + EmptyLayer: true, + }, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err) + } + configDigest := digest.Canonical.FromBytes(configBytes) + + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: imgspecv1.Descriptor{ + Digest: configDigest, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: []imgspecv1.Descriptor{{ + Digest: layerDigest, + Size: layerSize, + MediaType: imgspecv1.MediaTypeImageLayer, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err) + } + + succeeded = true + return &sifImageSource{ + ref: ref, + workDir: workDir, + layerDigest: layerDigest, + layerSize: layerSize, + layerFile: layerPath, + config: configBytes, + configDigest: configDigest, + manifest: manifestBytes, + }, nil +} + +// Reference returns the reference used to set up this source. +func (s *sifImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *sifImageSource) Close() error { + return os.RemoveAll(s.workDir) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *sifImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + switch info.Digest { + case s.configDigest: + return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + case s.layerDigest: + reader, err := os.Open(s.layerFile) + if err != nil { + return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err) + } + return reader, s.layerSize, nil + default: + return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String()) + } +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New("manifest lists are not supported by the sif transport") + } + return s.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by the sif transport") + } + return nil, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go new file mode 100644 index 00000000000..18d894bc35c --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -0,0 +1,164 @@ +package sif + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for SIF images. +var Transport = sifTransport{} + +type sifTransport struct{} + +func (t sifTransport) Name() string { + return "sif" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// sifReference is an ImageReference for SIF images. +type sifReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + file string // As specified by the user. May be relative, contain symlinks, etc. + resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no sif.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// sifTransport.ParseReference; callers who intentionally deal with SIF files +// can use sif.NewReference. + +// NewReference returns an image file reference for a specified path. +func NewReference(file string) (types.ImageReference, error) { + // We do not expose an API supplying the resolvedFile; we could, but recomputing it + // is generally cheap enough that we prefer being confident about the properties of resolvedFile. + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + return sifReference{file: file, resolvedFile: resolved}, nil +} + +func (ref sifReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref sifReference) StringWithinTransport() string { + return ref.file +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref sifReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref sifReference) PolicyConfigurationIdentity() string { + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref sifReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by sifTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.New(`"sif:" locations can only be read from, not written to`) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for sif: images") +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index 07fdd42a9af..8e9ce0dd236 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -3,22 +3,46 @@ package signature import ( + "errors" "fmt" + "strings" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/opencontainers/go-digest" ) +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + // SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { manifestDigest, err := manifest.Digest(m) if err != nil { return nil, err } sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) } // VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go index 2c08c231ea8..249b5a1fe46 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -6,16 +6,19 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "strings" - "golang.org/x/crypto/openpgp" + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // SigningMechanism abstracts a way to sign binary blobs and verify their signatures. // Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) type SigningMechanism interface { // Close removes resources associated with the mechanism, if any. Close() error @@ -34,6 +37,15 @@ type SigningMechanism interface { UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) } +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + // SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. type SigningNotSupportedError string @@ -70,7 +82,7 @@ func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents if !md.IsSigned { return nil, "", errors.New("The input is not a signature") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: An error during reading the body can happen only if // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index 6ae74d430d6..4c7968417ed 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -5,11 +5,11 @@ package signature import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" - "github.com/mtrmac/gpgme" + "github.com/proglottis/gpgme" ) // A GPG/OpenPGP signing mechanism, implemented using gpgme. @@ -20,7 +20,7 @@ type gpgmeSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { ctx, err := newGPGMEContext(optionalDir) if err != nil { return nil, err @@ -35,8 +35,8 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { + dir, err := os.MkdirTemp("", "containers-ephemeral-gpg-") if err != nil { return nil, nil, err } @@ -117,9 +117,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error { return nil } -// Sign creates a (non-detached) signature of input using keyIdentity. +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { key, err := m.ctx.GetKey(keyIdentity, true) if err != nil { return nil, err @@ -133,12 +133,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, if err != nil { return nil, err } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { return nil, err } return sigBuffer.Bytes(), nil } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { signedBuffer := bytes.Buffer{} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index 0a09788f989..63cb7788bbb 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -7,14 +7,21 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "path" "strings" "time" "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + //lint:ignore SA1019 See above + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. @@ -24,7 +31,7 @@ type openpgpSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -37,7 +44,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er } } - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) + pubring, err := os.ReadFile(path.Join(gpgHome, "pubring.gpg")) if err != nil { if !os.IsNotExist(err) { return nil, err @@ -55,7 +62,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -104,10 +111,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error { // Sign creates a (non-detached) signature of input using keyIdentity. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) @@ -117,7 +130,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [ if !md.IsSigned { return nil, "", errors.New("not signed") } - content, err := ioutil.ReadAll(md.UnverifiedBody) + content, err := io.ReadAll(md.UnverifiedBody) if err != nil { // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted // (and possibly also signed, but it _must_ be encrypted) and the signing diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 82fbb68cb14..bb91cae8c16 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -16,7 +16,6 @@ package signature import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -80,7 +79,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri // NewPolicyFromFile returns a policy configured in the specified file. func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) + contents, err := os.ReadFile(fileName) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 26cca4759e0..65e8259732b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -5,7 +5,7 @@ package signature import ( "context" "fmt" - "io/ioutil" + "os" "strings" "github.com/containers/image/v5/manifest" @@ -33,7 +33,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types if pr.KeyData != nil { data = pr.KeyData } else { - d, err := ioutil.ReadFile(pr.KeyPath) + d, err := os.ReadFile(pr.KeyPath) if err != nil { return sarRejected, nil, err } diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go index 09f4f85e0a8..05bf8229e7d 100644 --- a/vendor/github.com/containers/image/v5/signature/signature.go +++ b/vendor/github.com/containers/image/v5/signature/signature.go @@ -190,12 +190,20 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { // of the system just because it is a private key — actually the presence of a private key // on the system increases the likelihood of an a successful attack on that private key // on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { json, err := json.Marshal(s) if err != nil { return nil, err } + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + return mech.Sign(json, keyIdentity) } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 7329ef6eee0..8071e3b32fa 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -10,7 +10,6 @@ import ( stderrors "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -18,9 +17,9 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/tmpdir" - internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" @@ -155,7 +154,7 @@ func (s *storageImageSource) HasThreadSafeGetBlob() bool { // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { if info.Digest == image.GzippedEmptyLayerDigest { - return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } // NOTE: the blob is first written to a temporary file and subsequently @@ -167,7 +166,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c } defer rc.Close() - tmpFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") if err != nil { return nil, 0, err } @@ -210,7 +209,7 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC } r := bytes.NewReader(b) logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil + return io.NopCloser(r), int64(r.Len()), "", nil } // Step through the list of matching layers. Tests may want to verify that if we have multiple layers // which claim to have the same contents, that we actually do have multiple layers, otherwise we could @@ -395,7 +394,7 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest * // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until // it's time to Commit() the image func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") if err != nil { return nil, errors.Wrapf(err, "creating a temporary directory") } @@ -446,15 +445,20 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) } -// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is -// set, the blob will be committed directly. Either by the calling goroutine -// or by another goroutine already committing layers. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with non "WithOptions" functions is not -// supported. -func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) { - info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig) +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) if err != nil { return info, err } @@ -466,11 +470,6 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) } -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (s *storageImageDestination) HasThreadSafePutBlob() bool { - return true -} - // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -480,6 +479,15 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{ + Cache: cache, + IsConfig: isConfig, + }) +} + +// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. errorBlobInfo := types.BlobInfo{ @@ -534,7 +542,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). - cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) return types.BlobInfo{ Digest: blobDigest, Size: blobSize, @@ -542,80 +550,40 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, }, nil } -// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If -// options.LayerIndex is set, the reused blob will be recoreded as already -// pulled. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with the non "WithOptions" functions -// is not supported. -func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef) - if err != nil || !reused || options.LayerIndex == nil { - return reused, info, err - } - - return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) -} - -// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob. -// If ref is provided, this function first tries to get layer from Additional Layer Store. -func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) { - // lock the entire method as it executes fairly quickly - s.lock.Lock() - defer s.lock.Unlock() - - if ref != nil { - // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String()) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) - } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[blobinfo.Digest] = aLayer - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: aLayer.CompressedSize(), - MediaType: blobinfo.MediaType, - }, nil - } - } - - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} - type zstdFetcher struct { - stream internalTypes.ImageSourceSeekable - ctx context.Context - blobInfo types.BlobInfo + chunkAccessor private.BlobChunkAccessor + ctx context.Context + blobInfo types.BlobInfo } -// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt. +// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - var newChunks []internalTypes.ImageSourceChunk + var newChunks []private.ImageSourceChunk for _, v := range chunks { - i := internalTypes.ImageSourceChunk{ + i := private.ImageSourceChunk{ Offset: v.Offset, Length: v.Length, } newChunks = append(newChunks, i) } - rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks) - if _, ok := err.(internalTypes.BadPartialRequestError); ok { + rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(private.BadPartialRequestError); ok { err = chunked.ErrBadRequest{} } return rc, errs, err } -// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed -// in a non-sequential way to retrieve the missing chunks. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { fetcher := zstdFetcher{ - stream: stream, - ctx: ctx, - blobInfo: srcInfo, + chunkAccessor: chunkAccessor, + ctx: ctx, + blobInfo: srcInfo, } differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) @@ -640,6 +608,22 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int return srcInfo, nil } +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -650,16 +634,36 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{ + Cache: cache, + CanSubstitute: canSubstitute, + }) +} + +// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} + if options.SrcRef != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[blobinfo.Digest] = aLayer + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: aLayer.CompressedSize(), + MediaType: blobinfo.MediaType, + }, nil + } + } -// tryReusingBlobLocked implements a core functionality of TryReusingBlob. -// This must be called with a lock being held on storageImageDestination. -func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if blobinfo.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) } @@ -708,9 +712,9 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob // Does the blob correspond to a known DiffID which we already have available? // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the - // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. - if canSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. + if options.CanSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest) @@ -720,8 +724,8 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest return true, blobinfo, nil } - if !canSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) + if !options.CanSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) } s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest return true, types.BlobInfo{ @@ -786,7 +790,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er } // Assume it's a file, since we're only calling this from a place that expects to read files. if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) + contents, err2 := os.ReadFile(filename) if err2 != nil { return nil, errors.Wrapf(err2, `reading blob from file %q`, filename) } @@ -1131,7 +1135,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t delete(dataBlobs, layerBlob.Digest) } for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) + v, err := os.ReadFile(s.filenames[blob]) if err != nil { return errors.Wrapf(err, "copying non-layer blob %q to image", blob) } @@ -1192,21 +1196,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } logrus.Debugf("saved image metadata %q", string(metadata)) } - // Set the reference's name on the image. We don't need to worry about avoiding duplicate - // values because SetNames() will deduplicate the list that we pass to it. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, name.String()) + // Adds the reference's name on the image. We don't need to worry about avoiding duplicate + // values because AddNames() will deduplicate the list that we pass to it. + if name := s.imageRef.DockerReference(); name != nil { + if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { + return errors.Wrapf(err, "adding names %v to image %q", name, img.ID) } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "setting names %v on image %q", names, img.ID) - } - logrus.Debugf("set names of image %q to %v", img.ID, names) + logrus.Debugf("added name %q to image %q", name, img.ID) } commitSucceeded = true @@ -1261,6 +1257,11 @@ func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { return true // Yes, we want the unmodified manifest } +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (s *storageImageDestination) SupportsPutBlobPartial() bool { + return true +} + // PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { sizes := []int{} diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 694ad17bd1a..aedfdf5de65 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "runtime" "strings" @@ -87,7 +86,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System uncompressed = nil } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - n, err := io.Copy(ioutil.Discard, reader) + n, err := io.Copy(io.Discard, reader) if err != nil { return nil, fmt.Errorf("error reading %q: %v", filename, err) } @@ -217,14 +216,14 @@ func (is *tarballImageSource) HasThreadSafeGetBlob() bool { func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { // We should only be asked about things in the manifest. Maybe the configuration blob. if blobinfo.Digest == is.configID { - return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil } // Maybe one of the layer blobs. for i := range is.blobIDs { if blobinfo.Digest == is.blobIDs[i] { // We want to read that layer: open the file or memory block and hand it back. if is.filenames[i] == "-" { - return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil } reader, err := os.Open(is.filenames[i]) if err != nil { diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go index d407c657fae..63d835530b5 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_transport.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_transport.go @@ -3,7 +3,7 @@ package tarball import ( "errors" "fmt" - "io/ioutil" + "io" "os" "strings" @@ -36,7 +36,7 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc filenames := strings.Split(reference, separator) for _, filename := range filenames { if filename == "-" { - stdin, err = ioutil.ReadAll(os.Stdin) + stdin, err = io.ReadAll(os.Stdin) if err != nil { return nil, fmt.Errorf("error buffering stdin: %v", err) } diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 2110a091d28..0bae8b2599d 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -12,7 +12,9 @@ import ( _ "github.com/containers/image/v5/oci/archive" _ "github.com/containers/image/v5/oci/layout" _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/sif" _ "github.com/containers/image/v5/tarball" + // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go "github.com/containers/image/v5/transports" diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index c98a6c6fda0..dcff8caf76b 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -561,6 +561,11 @@ type SystemContext struct { UserShortNameAliasConfPath string // If set, short-name resolution in pkg/shortnames must follow the specified mode ShortNameMode *ShortNameMode + // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and + // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce + // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this + // specific context. + PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool // If not "", overrides the default path for the authentication file, but only new format files AuthFilePath string // if not "", overrides the default path for the authentication file, but with the legacy format; diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index ffb2a4ce259..6295b549305 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 17 + VersionMinor = 21 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..83b061c70b2 --- /dev/null +++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The libtrust Project Community Code of Conduct + +The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md new file mode 100644 index 00000000000..fab2c41e89b --- /dev/null +++ b/vendor/github.com/containers/libtrust/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the libtrust Project + +The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/podman/v3/libpod/define/annotations.go b/vendor/github.com/containers/podman/v3/libpod/define/annotations.go deleted file mode 100644 index f6b1c06ead6..00000000000 --- a/vendor/github.com/containers/podman/v3/libpod/define/annotations.go +++ /dev/null @@ -1,68 +0,0 @@ -package define - -const ( - // InspectAnnotationCIDFile is used by Inspect to determine if a - // container ID file was created for the container. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationCIDFile = "io.podman.annotations.cid-file" - // InspectAnnotationAutoremove is used by Inspect to determine if a - // container will be automatically removed on exit. - // If an annotation with this key is found in the OCI spec and is one of - // the two supported boolean values (InspectResponseTrue and - // InspectResponseFalse) it will be used in the output of Inspect(). - InspectAnnotationAutoremove = "io.podman.annotations.autoremove" - // InspectAnnotationVolumesFrom is used by Inspect to identify - // containers whose volumes are are being used by this container. - // It is expected to be a comma-separated list of container names and/or - // IDs. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationVolumesFrom = "io.podman.annotations.volumes-from" - // InspectAnnotationPrivileged is used by Inspect to identify containers - // which are privileged (IE, running with elevated privileges). - // It is expected to be a boolean, populated by one of - // InspectResponseTrue or InspectResponseFalse. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationPrivileged = "io.podman.annotations.privileged" - // InspectAnnotationPublishAll is used by Inspect to identify containers - // which have all the ports from their image published. - // It is expected to be a boolean, populated by one of - // InspectResponseTrue or InspectResponseFalse. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationPublishAll = "io.podman.annotations.publish-all" - // InspectAnnotationInit is used by Inspect to identify containers that - // mount an init binary in. - // It is expected to be a boolean, populated by one of - // InspectResponseTrue or InspectResponseFalse. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationInit = "io.podman.annotations.init" - // InspectAnnotationLabel is used by Inspect to identify containers with - // special SELinux-related settings. It is used to populate the output - // of the SecurityOpt setting. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationLabel = "io.podman.annotations.label" - // InspectAnnotationSeccomp is used by Inspect to identify containers - // with special Seccomp-related settings. It is used to populate the - // output of the SecurityOpt setting in Inspect. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationSeccomp = "io.podman.annotations.seccomp" - // InspectAnnotationApparmor is used by Inspect to identify containers - // with special Apparmor-related settings. It is used to populate the - // output of the SecurityOpt setting. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationApparmor = "io.podman.annotations.apparmor" - - // InspectResponseTrue is a boolean True response for an inspect - // annotation. - InspectResponseTrue = "TRUE" - // InspectResponseFalse is a boolean False response for an inspect - // annotation. - InspectResponseFalse = "FALSE" -) diff --git a/vendor/github.com/containers/podman/v3/libpod/events/logfile.go b/vendor/github.com/containers/podman/v3/libpod/events/logfile.go deleted file mode 100644 index e3f0ab8f033..00000000000 --- a/vendor/github.com/containers/podman/v3/libpod/events/logfile.go +++ /dev/null @@ -1,106 +0,0 @@ -package events - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/containers/podman/v3/pkg/util" - "github.com/containers/storage/pkg/lockfile" - "github.com/pkg/errors" -) - -// EventLogFile is the structure for event writing to a logfile. It contains the eventer -// options and the event itself. Methods for reading and writing are also defined from it. -type EventLogFile struct { - options EventerOptions -} - -// Writes to the log file -func (e EventLogFile) Write(ee Event) error { - // We need to lock events file - lock, err := lockfile.GetLockfile(e.options.LogFilePath + ".lock") - if err != nil { - return err - } - lock.Lock() - defer lock.Unlock() - f, err := os.OpenFile(e.options.LogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0700) - if err != nil { - return err - } - defer f.Close() - eventJSONString, err := ee.ToJSONString() - if err != nil { - return err - } - if _, err := f.WriteString(fmt.Sprintf("%s\n", eventJSONString)); err != nil { - return err - } - return nil -} - -// Reads from the log file -func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { - defer close(options.EventChannel) - filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) - if err != nil { - return errors.Wrapf(err, "failed to parse event filters") - } - t, err := e.getTail(options) - if err != nil { - return err - } - if len(options.Until) > 0 { - untilTime, err := util.ParseInputTime(options.Until, false) - if err != nil { - return err - } - go func() { - time.Sleep(time.Until(untilTime)) - t.Stop() - }() - } - funcDone := make(chan bool) - copy := true - go func() { - select { - case <-funcDone: - // Do nothing - case <-ctx.Done(): - copy = false - t.Kill(errors.New("hangup by client")) - } - }() - for line := range t.Lines { - select { - case <-ctx.Done(): - // the consumer has cancelled - return nil - default: - // fallthrough - } - - event, err := newEventFromJSONString(line.Text) - if err != nil { - return err - } - switch event.Type { - case Image, Volume, Pod, System, Container, Network: - // no-op - default: - return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) - } - if copy && applyFilters(event, filterMap) { - options.EventChannel <- event - } - } - funcDone <- true - return nil -} - -// String returns a string representation of the logger -func (e EventLogFile) String() string { - return LogFile.String() -} diff --git a/vendor/github.com/containers/podman/v3/libpod/network/cni/config.go b/vendor/github.com/containers/podman/v3/libpod/network/cni/config.go deleted file mode 100644 index 3df155637b1..00000000000 --- a/vendor/github.com/containers/podman/v3/libpod/network/cni/config.go +++ /dev/null @@ -1,344 +0,0 @@ -// +build linux - -package cni - -import ( - "net" - "os" - - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/libpod/network/util" - pkgutil "github.com/containers/podman/v3/pkg/util" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" -) - -// NetworkCreate will take a partial filled Network and fill the -// missing fields. It creates the Network and returns the full Network. -func (n *cniNetwork) NetworkCreate(net types.Network) (types.Network, error) { - n.lock.Lock() - defer n.lock.Unlock() - err := n.loadNetworks() - if err != nil { - return types.Network{}, err - } - network, err := n.networkCreate(net, false) - if err != nil { - return types.Network{}, err - } - // add the new network to the map - n.networks[network.libpodNet.Name] = network - return *network.libpodNet, nil -} - -// networkCreate will fill out the given network struct and return the new network entry. -// If defaultNet is true it will not validate against used subnets and it will not write the cni config to disk. -func (n *cniNetwork) networkCreate(newNetwork types.Network, defaultNet bool) (*network, error) { - // if no driver is set use the default one - if newNetwork.Driver == "" { - newNetwork.Driver = types.DefaultNetworkDriver - } - - // FIXME: Should we use a different type for network create without the ID field? - // the caller is not allowed to set a specific ID - if newNetwork.ID != "" { - return nil, errors.Wrap(define.ErrInvalidArg, "ID can not be set for network create") - } - - if newNetwork.Labels == nil { - newNetwork.Labels = map[string]string{} - } - if newNetwork.Options == nil { - newNetwork.Options = map[string]string{} - } - if newNetwork.IPAMOptions == nil { - newNetwork.IPAMOptions = map[string]string{} - } - - var name string - var err error - // validate the name when given - if newNetwork.Name != "" { - if !define.NameRegex.MatchString(newNetwork.Name) { - return nil, errors.Wrapf(define.RegexError, "network name %s invalid", newNetwork.Name) - } - if _, ok := n.networks[newNetwork.Name]; ok { - return nil, errors.Wrapf(define.ErrNetworkExists, "network name %s already used", newNetwork.Name) - } - } else { - name, err = n.getFreeDeviceName() - if err != nil { - return nil, err - } - newNetwork.Name = name - } - - // Only get the used networks for validation if we do not create the default network. - // The default network should not be validated against used subnets, we have to ensure - // that this network can always be created even when a subnet is already used on the host. - // This could happen if you run a container on this net, then the cni interface will be - // created on the host and "block" this subnet from being used again. - // Therefore the next podman command tries to create the default net again and it would - // fail because it thinks the network is used on the host. - var usedNetworks []*net.IPNet - if !defaultNet { - usedNetworks, err = n.getUsedSubnets() - if err != nil { - return nil, err - } - } - - switch newNetwork.Driver { - case types.BridgeNetworkDriver: - // if the name was created with getFreeDeviceName set the interface to it as well - if name != "" && newNetwork.NetworkInterface == "" { - newNetwork.NetworkInterface = name - } - err = n.createBridge(&newNetwork, usedNetworks) - if err != nil { - return nil, err - } - case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: - err = createIPMACVLAN(&newNetwork) - if err != nil { - return nil, err - } - default: - return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) - } - - for i := range newNetwork.Subnets { - err := validateSubnet(&newNetwork.Subnets[i], !newNetwork.Internal, usedNetworks) - if err != nil { - return nil, err - } - if util.IsIPv6(newNetwork.Subnets[i].Subnet.IP) { - newNetwork.IPv6Enabled = true - } - } - - // generate the network ID - newNetwork.ID = getNetworkIDFromName(newNetwork.Name) - - // FIXME: Should this be a hard error? - if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) { - logrus.Warnf("dnsname and internal networks are incompatible. dnsname plugin not configured for network %s", newNetwork.Name) - newNetwork.DNSEnabled = false - } - - cniConf, path, err := n.createCNIConfigListFromNetwork(&newNetwork, !defaultNet) - if err != nil { - return nil, err - } - return &network{cniNet: cniConf, libpodNet: &newNetwork, filename: path}, nil -} - -// NetworkRemove will remove the Network with the given name or ID. -// It does not ensure that the network is unused. -func (n *cniNetwork) NetworkRemove(nameOrID string) error { - n.lock.Lock() - defer n.lock.Unlock() - err := n.loadNetworks() - if err != nil { - return err - } - - network, err := n.getNetwork(nameOrID) - if err != nil { - return err - } - - // Removing the default network is not allowed. - if network.libpodNet.Name == n.defaultNetwork { - return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) - } - - // Remove the bridge network interface on the host. - if network.libpodNet.Driver == types.BridgeNetworkDriver { - link, err := netlink.LinkByName(network.libpodNet.NetworkInterface) - if err == nil { - err = netlink.LinkDel(link) - // only log the error, it is not fatal - if err != nil { - logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err) - } - } - } - - file := network.filename - delete(n.networks, network.libpodNet.Name) - - // make sure to not error for ErrNotExist - if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - return nil -} - -// NetworkList will return all known Networks. Optionally you can -// supply a list of filter functions. Only if a network matches all -// functions it is returned. -func (n *cniNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { - n.lock.Lock() - defer n.lock.Unlock() - err := n.loadNetworks() - if err != nil { - return nil, err - } - - networks := make([]types.Network, 0, len(n.networks)) -outer: - for _, net := range n.networks { - for _, filter := range filters { - // All filters have to match, if one does not match we can skip to the next network. - if !filter(*net.libpodNet) { - continue outer - } - } - networks = append(networks, *net.libpodNet) - } - return networks, nil -} - -// NetworkInspect will return the Network with the given name or ID. -func (n *cniNetwork) NetworkInspect(nameOrID string) (types.Network, error) { - n.lock.Lock() - defer n.lock.Unlock() - err := n.loadNetworks() - if err != nil { - return types.Network{}, err - } - - network, err := n.getNetwork(nameOrID) - if err != nil { - return types.Network{}, err - } - return *network.libpodNet, nil -} - -func createIPMACVLAN(network *types.Network) error { - if network.Internal { - return errors.New("internal is not supported with macvlan") - } - if network.NetworkInterface != "" { - interfaceNames, err := util.GetLiveNetworkNames() - if err != nil { - return err - } - if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { - return errors.Errorf("parent interface %s does not exists", network.NetworkInterface) - } - } - if len(network.Subnets) == 0 { - network.IPAMOptions["driver"] = types.DHCPIPAMDriver - } else { - network.IPAMOptions["driver"] = types.HostLocalIPAMDriver - } - return nil -} - -func (n *cniNetwork) createBridge(network *types.Network, usedNetworks []*net.IPNet) error { - if network.NetworkInterface != "" { - bridges := n.getBridgeInterfaceNames() - if pkgutil.StringInSlice(network.NetworkInterface, bridges) { - return errors.Errorf("bridge name %s already in use", network.NetworkInterface) - } - if !define.NameRegex.MatchString(network.NetworkInterface) { - return errors.Wrapf(define.RegexError, "bridge name %s invalid", network.NetworkInterface) - } - } else { - var err error - network.NetworkInterface, err = n.getFreeDeviceName() - if err != nil { - return err - } - } - - if len(network.Subnets) == 0 { - freeSubnet, err := n.getFreeIPv4NetworkSubnet(usedNetworks) - if err != nil { - return err - } - network.Subnets = append(network.Subnets, *freeSubnet) - } - // ipv6 enabled means dual stack, check if we already have - // a ipv4 or ipv6 subnet and add one if not. - if network.IPv6Enabled { - ipv4 := false - ipv6 := false - for _, subnet := range network.Subnets { - if util.IsIPv6(subnet.Subnet.IP) { - ipv6 = true - } - if util.IsIPv4(subnet.Subnet.IP) { - ipv4 = true - } - } - if !ipv4 { - freeSubnet, err := n.getFreeIPv4NetworkSubnet(usedNetworks) - if err != nil { - return err - } - network.Subnets = append(network.Subnets, *freeSubnet) - } - if !ipv6 { - freeSubnet, err := n.getFreeIPv6NetworkSubnet(usedNetworks) - if err != nil { - return err - } - network.Subnets = append(network.Subnets, *freeSubnet) - } - } - network.IPAMOptions["driver"] = types.HostLocalIPAMDriver - return nil -} - -// validateSubnet will validate a given Subnet. It checks if the -// given gateway and lease range are part of this subnet. If the -// gateway is empty and addGateway is true it will get the first -// available ip in the subnet assigned. -func validateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet) error { - if s == nil { - return errors.New("subnet is nil") - } - if s.Subnet.IP == nil { - return errors.New("subnet ip is nil") - } - - // Reparse to ensure subnet is valid. - // Do not use types.ParseCIDR() because we want the ip to be - // the network address and not a random ip in the subnet. - _, net, err := net.ParseCIDR(s.Subnet.String()) - if err != nil { - return errors.Wrap(err, "subnet invalid") - } - - // check that the new subnet does not conflict with existing ones - if util.NetworkIntersectsWithNetworks(net, usedNetworks) { - return errors.Errorf("subnet %s is already used on the host or by another config", net.String()) - } - - s.Subnet = types.IPNet{IPNet: *net} - if s.Gateway != nil { - if !s.Subnet.Contains(s.Gateway) { - return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet) - } - } else if addGateway { - ip, err := util.FirstIPInSubnet(net) - if err != nil { - return err - } - s.Gateway = ip - } - if s.LeaseRange != nil { - if s.LeaseRange.StartIP != nil && !s.Subnet.Contains(s.LeaseRange.StartIP) { - return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet) - } - if s.LeaseRange.EndIP != nil && !s.Subnet.Contains(s.LeaseRange.EndIP) { - return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet) - } - } - return nil -} diff --git a/vendor/github.com/containers/podman/v3/libpod/network/types/const.go b/vendor/github.com/containers/podman/v3/libpod/network/types/const.go deleted file mode 100644 index 916c6e6bf7e..00000000000 --- a/vendor/github.com/containers/podman/v3/libpod/network/types/const.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -const ( - // BridgeNetworkDriver defines the bridge driver - BridgeNetworkDriver = "bridge" - // DefaultNetworkDriver is the default network type used - DefaultNetworkDriver = BridgeNetworkDriver - // MacVLANNetworkDriver defines the macvlan driver - MacVLANNetworkDriver = "macvlan" - // MacVLANNetworkDriver defines the macvlan driver - IPVLANNetworkDriver = "ipvlan" - - // IPAM drivers - // HostLocalIPAMDriver store the ip - HostLocalIPAMDriver = "host-local" - // DHCPIPAMDriver get subnet and ip from dhcp server - DHCPIPAMDriver = "dhcp" - - // DefaultSubnet is the name that will be used for the default CNI network. - DefaultNetworkName = "podman" - // DefaultSubnet is the subnet that will be used for the default CNI network. - DefaultSubnet = "10.88.0.0/16" -) diff --git a/vendor/github.com/containers/podman/v3/libpod/network/util/ip.go b/vendor/github.com/containers/podman/v3/libpod/network/util/ip.go deleted file mode 100644 index b2ba9273567..00000000000 --- a/vendor/github.com/containers/podman/v3/libpod/network/util/ip.go +++ /dev/null @@ -1,113 +0,0 @@ -package util - -import ( - "crypto/rand" - "net" - - "github.com/pkg/errors" -) - -// IsIPv6 returns true if netIP is IPv6. -func IsIPv6(netIP net.IP) bool { - return netIP != nil && netIP.To4() == nil -} - -// IsIPv4 returns true if netIP is IPv4. -func IsIPv4(netIP net.IP) bool { - return netIP != nil && netIP.To4() != nil -} - -func incByte(subnet *net.IPNet, idx int, shift uint) error { - if idx < 0 { - return errors.New("no more subnets left") - } - if subnet.IP[idx] == 255 { - subnet.IP[idx] = 0 - return incByte(subnet, idx-1, 0) - } - subnet.IP[idx] += 1 << shift - return nil -} - -// NextSubnet returns subnet incremented by 1 -func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) { - newSubnet := &net.IPNet{ - IP: subnet.IP, - Mask: subnet.Mask, - } - ones, bits := newSubnet.Mask.Size() - if ones == 0 { - return nil, errors.Errorf("%s has only one subnet", subnet.String()) - } - zeroes := uint(bits - ones) - shift := zeroes % 8 - idx := ones/8 - 1 - if idx < 0 { - idx = 0 - } - if err := incByte(newSubnet, idx, shift); err != nil { - return nil, err - } - return newSubnet, nil -} - -// LastIPInSubnet gets the last IP in a subnet -func LastIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer - // re-parse to ensure clean network address - _, cidr, err := net.ParseCIDR(addr.String()) - if err != nil { - return nil, err - } - - ones, bits := cidr.Mask.Size() - if ones == bits { - return cidr.IP, nil - } - for i := range cidr.IP { - cidr.IP[i] = cidr.IP[i] | ^cidr.Mask[i] - } - return cidr.IP, nil -} - -// FirstIPInSubnet gets the first IP in a subnet -func FirstIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer - // re-parse to ensure clean network address - _, cidr, err := net.ParseCIDR(addr.String()) - if err != nil { - return nil, err - } - ones, bits := cidr.Mask.Size() - if ones == bits { - return cidr.IP, nil - } - cidr.IP[len(cidr.IP)-1]++ - return cidr.IP, nil -} - -func NetworkIntersectsWithNetworks(n *net.IPNet, networklist []*net.IPNet) bool { - for _, nw := range networklist { - if networkIntersect(n, nw) { - return true - } - } - return false -} - -func networkIntersect(n1, n2 *net.IPNet) bool { - return n2.Contains(n1.IP) || n1.Contains(n2.IP) -} - -// GetRandomIPv6Subnet returns a random internal ipv6 subnet as described in RFC3879. -func GetRandomIPv6Subnet() (net.IPNet, error) { - ip := make(net.IP, 8, net.IPv6len) - // read 8 random bytes - _, err := rand.Read(ip) - if err != nil { - return net.IPNet{}, nil - } - // first byte must be FD as per RFC3879 - ip[0] = 0xfd - // add 8 zero bytes - ip = append(ip, make([]byte, 8)...) - return net.IPNet{IP: ip, Mask: net.CIDRMask(64, 128)}, nil -} diff --git a/vendor/github.com/containers/podman/v3/libpod/oci_util.go b/vendor/github.com/containers/podman/v3/libpod/oci_util.go deleted file mode 100644 index c1afc0d203c..00000000000 --- a/vendor/github.com/containers/podman/v3/libpod/oci_util.go +++ /dev/null @@ -1,154 +0,0 @@ -package libpod - -import ( - "fmt" - "net" - "os" - "regexp" - "strings" - "time" - - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Timeout before declaring that runtime has failed to kill a given -// container -const killContainerTimeout = 5 * time.Second - -// ociError is used to parse the OCI runtime JSON log. It is not part of the -// OCI runtime specifications, it follows what runc does -type ociError struct { - Level string `json:"level,omitempty"` - Time string `json:"time,omitempty"` - Msg string `json:"msg,omitempty"` -} - -// Create systemd unit name for cgroup scopes -func createUnitName(prefix string, name string) string { - return fmt.Sprintf("%s-%s.scope", prefix, name) -} - -// Bind ports to keep them closed on the host -func bindPorts(ports []types.OCICNIPortMapping) ([]*os.File, error) { - var files []*os.File - notifySCTP := false - for _, i := range ports { - isV6 := net.ParseIP(i.HostIP).To4() == nil - if i.HostIP == "" { - isV6 = false - } - switch i.Protocol { - case "udp": - var ( - addr *net.UDPAddr - err error - ) - if isV6 { - addr, err = net.ResolveUDPAddr("udp6", fmt.Sprintf("[%s]:%d", i.HostIP, i.HostPort)) - } else { - addr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort)) - } - if err != nil { - return nil, errors.Wrapf(err, "cannot resolve the UDP address") - } - - proto := "udp4" - if isV6 { - proto = "udp6" - } - server, err := net.ListenUDP(proto, addr) - if err != nil { - return nil, errors.Wrapf(err, "cannot listen on the UDP port") - } - f, err := server.File() - if err != nil { - return nil, errors.Wrapf(err, "cannot get file for UDP socket") - } - files = append(files, f) - // close the listener - // note that this does not affect the fd, see the godoc for server.File() - err = server.Close() - if err != nil { - logrus.Warnf("Failed to close connection: %v", err) - } - - case "tcp": - var ( - addr *net.TCPAddr - err error - ) - if isV6 { - addr, err = net.ResolveTCPAddr("tcp6", fmt.Sprintf("[%s]:%d", i.HostIP, i.HostPort)) - } else { - addr, err = net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort)) - } - if err != nil { - return nil, errors.Wrapf(err, "cannot resolve the TCP address") - } - - proto := "tcp4" - if isV6 { - proto = "tcp6" - } - server, err := net.ListenTCP(proto, addr) - if err != nil { - return nil, errors.Wrapf(err, "cannot listen on the TCP port") - } - f, err := server.File() - if err != nil { - return nil, errors.Wrapf(err, "cannot get file for TCP socket") - } - files = append(files, f) - // close the listener - // note that this does not affect the fd, see the godoc for server.File() - err = server.Close() - if err != nil { - logrus.Warnf("Failed to close connection: %v", err) - } - - case "sctp": - if !notifySCTP { - notifySCTP = true - logrus.Warnf("Port reservation for SCTP is not supported") - } - default: - return nil, fmt.Errorf("unknown protocol %s", i.Protocol) - } - } - return files, nil -} - -func getOCIRuntimeError(runtimeMsg string) error { - includeFullOutput := logrus.GetLevel() == logrus.DebugLevel - - if match := regexp.MustCompile("(?i).*permission denied.*|.*operation not permitted.*").FindString(runtimeMsg); match != "" { - errStr := match - if includeFullOutput { - errStr = runtimeMsg - } - return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s", strings.Trim(errStr, "\n")) - } - if match := regexp.MustCompile("(?i).*executable file not found in.*|.*no such file or directory.*").FindString(runtimeMsg); match != "" { - errStr := match - if includeFullOutput { - errStr = runtimeMsg - } - return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s", strings.Trim(errStr, "\n")) - } - if match := regexp.MustCompile("`/proc/[a-z0-9-].+/attr.*`").FindString(runtimeMsg); match != "" { - errStr := match - if includeFullOutput { - errStr = runtimeMsg - } - if strings.HasSuffix(match, "/exec`") { - return errors.Wrapf(define.ErrSetSecurityAttribute, "%s", strings.Trim(errStr, "\n")) - } else if strings.HasSuffix(match, "/current`") { - return errors.Wrapf(define.ErrGetSecurityAttribute, "%s", strings.Trim(errStr, "\n")) - } - return errors.Wrapf(define.ErrSecurityAttribute, "%s", strings.Trim(errStr, "\n")) - } - return errors.Wrapf(define.ErrOCIRuntime, "%s", strings.Trim(runtimeMsg, "\n")) -} diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/manifest.go b/vendor/github.com/containers/podman/v3/pkg/domain/entities/manifest.go deleted file mode 100644 index 3f89e4d30b2..00000000000 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/manifest.go +++ /dev/null @@ -1,36 +0,0 @@ -package entities - -import "github.com/containers/image/v5/types" - -// TODO: add comments to *all* types and fields. - -type ManifestCreateOptions struct { - All bool `schema:"all"` -} - -// swagger:model ManifestAddOpts -type ManifestAddOptions struct { - All bool `json:"all" schema:"all"` - Annotation []string `json:"annotation" schema:"annotation"` - Arch string `json:"arch" schema:"arch"` - Authfile string `json:"-" schema:"-"` - CertDir string `json:"-" schema:"-"` - Features []string `json:"features" schema:"features"` - Images []string `json:"images" schema:"images"` - OS string `json:"os" schema:"os"` - OSVersion string `json:"os_version" schema:"os_version"` - Password string `json:"-" schema:"-"` - SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` - Username string `json:"-" schema:"-"` - Variant string `json:"variant" schema:"variant"` -} - -type ManifestAnnotateOptions struct { - Annotation []string `json:"annotation"` - Arch string `json:"arch" schema:"arch"` - Features []string `json:"features" schema:"features"` - OS string `json:"os" schema:"os"` - OSFeatures []string `json:"os_features" schema:"os_features"` - OSVersion string `json:"os_version" schema:"os_version"` - Variant string `json:"variant" schema:"variant"` -} diff --git a/vendor/github.com/containers/podman/v3/pkg/env/env_unsupported.go b/vendor/github.com/containers/podman/v3/pkg/env/env_unsupported.go deleted file mode 100644 index a71c2956df2..00000000000 --- a/vendor/github.com/containers/podman/v3/pkg/env/env_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!darwin - -package env - -func ParseSlice(s []string) (map[string]string, error) { - m := make(map[string]string) - return m, nil -} diff --git a/vendor/github.com/containers/podman/v3/pkg/rootlessport/rootlessport_linux.go b/vendor/github.com/containers/podman/v3/pkg/rootlessport/rootlessport_linux.go deleted file mode 100644 index 37fb7ce7938..00000000000 --- a/vendor/github.com/containers/podman/v3/pkg/rootlessport/rootlessport_linux.go +++ /dev/null @@ -1,377 +0,0 @@ -// +build linux - -// Package rootlessport provides reexec for RootlessKit-based port forwarder. -// -// init() contains reexec.Register() for ReexecKey . -// -// The reexec requires Config to be provided via stdin. -// -// The reexec writes human-readable error message on stdout on error. -// -// Debug log is printed on stderr. -package rootlessport - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - - "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/storage/pkg/reexec" - "github.com/pkg/errors" - rkport "github.com/rootless-containers/rootlesskit/pkg/port" - rkbuiltin "github.com/rootless-containers/rootlesskit/pkg/port/builtin" - rkportutil "github.com/rootless-containers/rootlesskit/pkg/port/portutil" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const ( - // ReexecKey is the reexec key for the parent process. - ReexecKey = "containers-rootlessport" - // reexecChildKey is used internally for the second reexec - reexecChildKey = "containers-rootlessport-child" - reexecChildEnvOpaque = "_CONTAINERS_ROOTLESSPORT_CHILD_OPAQUE" -) - -// Config needs to be provided to the process via stdin as a JSON string. -// stdin needs to be closed after the message has been written. -type Config struct { - Mappings []types.OCICNIPortMapping - NetNSPath string - ExitFD int - ReadyFD int - TmpDir string - ChildIP string - ContainerID string - RootlessCNI bool -} - -func init() { - reexec.Register(ReexecKey, func() { - if err := parent(); err != nil { - fmt.Println(err) - os.Exit(1) - } - }) - reexec.Register(reexecChildKey, func() { - if err := child(); err != nil { - fmt.Println(err) - os.Exit(1) - } - }) -} - -func loadConfig(r io.Reader) (*Config, io.ReadCloser, io.WriteCloser, error) { - stdin, err := ioutil.ReadAll(r) - if err != nil { - return nil, nil, nil, err - } - var cfg Config - if err := json.Unmarshal(stdin, &cfg); err != nil { - return nil, nil, nil, err - } - if cfg.NetNSPath == "" { - return nil, nil, nil, errors.New("missing NetNSPath") - } - if cfg.ExitFD <= 0 { - return nil, nil, nil, errors.New("missing ExitFD") - } - exitFile := os.NewFile(uintptr(cfg.ExitFD), "exitfile") - if exitFile == nil { - return nil, nil, nil, errors.New("invalid ExitFD") - } - if cfg.ReadyFD <= 0 { - return nil, nil, nil, errors.New("missing ReadyFD") - } - readyFile := os.NewFile(uintptr(cfg.ReadyFD), "readyfile") - if readyFile == nil { - return nil, nil, nil, errors.New("invalid ReadyFD") - } - return &cfg, exitFile, readyFile, nil -} - -func parent() error { - // load config from stdin - cfg, exitR, readyW, err := loadConfig(os.Stdin) - if err != nil { - return err - } - - socketDir := filepath.Join(cfg.TmpDir, "rp") - err = os.MkdirAll(socketDir, 0700) - if err != nil { - return err - } - - // create the parent driver - stateDir, err := ioutil.TempDir(cfg.TmpDir, "rootlessport") - if err != nil { - return err - } - defer os.RemoveAll(stateDir) - driver, err := rkbuiltin.NewParentDriver(&logrusWriter{prefix: "parent: "}, stateDir) - if err != nil { - return err - } - initComplete := make(chan struct{}) - quit := make(chan struct{}) - errCh := make(chan error) - // start the parent driver. initComplete will be closed when the child connected to the parent. - logrus.Infof("Starting parent driver") - go func() { - driverErr := driver.RunParentDriver(initComplete, quit, nil) - if driverErr != nil { - logrus.WithError(driverErr).Warn("Parent driver exited") - } - errCh <- driverErr - close(errCh) - }() - opaque := driver.OpaqueForChild() - logrus.Infof("Opaque=%+v", opaque) - opaqueJSON, err := json.Marshal(opaque) - if err != nil { - return err - } - childQuitR, childQuitW, err := os.Pipe() - if err != nil { - return err - } - defer func() { - // stop the child - logrus.Info("Stopping child driver") - if err := childQuitW.Close(); err != nil { - logrus.WithError(err).Warn("Unable to close childQuitW") - } - }() - - // reexec the child process in the child netns - cmd := exec.Command("/proc/self/exe") - cmd.Args = []string{reexecChildKey} - cmd.Stdin = childQuitR - cmd.Stdout = &logrusWriter{prefix: "child"} - cmd.Stderr = cmd.Stdout - cmd.Env = append(os.Environ(), reexecChildEnvOpaque+"="+string(opaqueJSON)) - childNS, err := ns.GetNS(cfg.NetNSPath) - if err != nil { - return err - } - if err := childNS.Do(func(_ ns.NetNS) error { - logrus.Infof("Starting child driver in child netns (%q %v)", cmd.Path, cmd.Args) - return cmd.Start() - }); err != nil { - return err - } - - childErrCh := make(chan error) - go func() { - err := cmd.Wait() - childErrCh <- err - close(childErrCh) - }() - - defer func() { - if err := unix.Kill(cmd.Process.Pid, unix.SIGTERM); err != nil { - logrus.WithError(err).Warn("Kill child process") - } - }() - - logrus.Info("Waiting for initComplete") - // wait for the child to connect to the parent -outer: - for { - select { - case <-initComplete: - logrus.Infof("initComplete is closed; parent and child established the communication channel") - break outer - case err := <-childErrCh: - if err != nil { - return err - } - case err := <-errCh: - if err != nil { - return err - } - } - } - - defer func() { - logrus.Info("Stopping parent driver") - quit <- struct{}{} - if err := <-errCh; err != nil { - logrus.WithError(err).Warn("Parent driver returned error on exit") - } - }() - - // let parent expose ports - logrus.Infof("Exposing ports %v", cfg.Mappings) - if err := exposePorts(driver, cfg.Mappings, cfg.ChildIP); err != nil { - return err - } - - // we only need to have a socket to reload ports when we run under rootless cni - if cfg.RootlessCNI { - socketfile := filepath.Join(socketDir, cfg.ContainerID) - // make sure to remove the file if it exists to prevent EADDRINUSE - _ = os.Remove(socketfile) - // workaround to bypass the 108 char socket path limit - // open the fd and use the path to the fd as bind argument - fd, err := unix.Open(socketDir, unix.O_PATH, 0) - if err != nil { - return err - } - socket, err := net.ListenUnix("unixpacket", &net.UnixAddr{Name: fmt.Sprintf("/proc/self/fd/%d/%s", fd, cfg.ContainerID), Net: "unixpacket"}) - if err != nil { - return err - } - err = unix.Close(fd) - // remove the socket file on exit - defer os.Remove(socketfile) - if err != nil { - logrus.Warnf("Failed to close the socketDir fd: %v", err) - } - defer socket.Close() - go serve(socket, driver) - } - - logrus.Info("Ready") - - // https://github.com/containers/podman/issues/11248 - // Copy /dev/null to stdout and stderr to prevent SIGPIPE errors - if f, err := os.OpenFile("/dev/null", os.O_WRONLY, 0755); err == nil { - unix.Dup2(int(f.Fd()), 1) // nolint:errcheck - unix.Dup2(int(f.Fd()), 2) // nolint:errcheck - f.Close() - } - // write and close ReadyFD (convention is same as slirp4netns --ready-fd) - if _, err := readyW.Write([]byte("1")); err != nil { - return err - } - if err := readyW.Close(); err != nil { - return err - } - - // wait for ExitFD to be closed - logrus.Info("Waiting for exitfd to be closed") - if _, err := ioutil.ReadAll(exitR); err != nil { - return err - } - return nil -} - -func serve(listener net.Listener, pm rkport.Manager) { - for { - conn, err := listener.Accept() - if err != nil { - // we cannot log this error, stderr is already closed - continue - } - ctx := context.TODO() - err = handler(ctx, conn, pm) - if err != nil { - conn.Write([]byte(err.Error())) - } else { - conn.Write([]byte("OK")) - } - conn.Close() - } -} - -func handler(ctx context.Context, conn io.Reader, pm rkport.Manager) error { - var childIP string - dec := json.NewDecoder(conn) - err := dec.Decode(&childIP) - if err != nil { - return errors.Wrap(err, "rootless port failed to decode ports") - } - portStatus, err := pm.ListPorts(ctx) - if err != nil { - return errors.Wrap(err, "rootless port failed to list ports") - } - for _, status := range portStatus { - err = pm.RemovePort(ctx, status.ID) - if err != nil { - return errors.Wrap(err, "rootless port failed to remove port") - } - } - // add the ports with the new child IP - for _, status := range portStatus { - // set the new child IP - status.Spec.ChildIP = childIP - _, err = pm.AddPort(ctx, status.Spec) - if err != nil { - return errors.Wrap(err, "rootless port failed to add port") - } - } - return nil -} - -func exposePorts(pm rkport.Manager, portMappings []types.OCICNIPortMapping, childIP string) error { - ctx := context.TODO() - for _, i := range portMappings { - hostIP := i.HostIP - if hostIP == "" { - hostIP = "0.0.0.0" - } - spec := rkport.Spec{ - Proto: i.Protocol, - ParentIP: hostIP, - ParentPort: int(i.HostPort), - ChildPort: int(i.ContainerPort), - ChildIP: childIP, - } - if err := rkportutil.ValidatePortSpec(spec, nil); err != nil { - return err - } - if _, err := pm.AddPort(ctx, spec); err != nil { - return err - } - } - return nil -} - -func child() error { - // load the config from the parent - var opaque map[string]string - if err := json.Unmarshal([]byte(os.Getenv(reexecChildEnvOpaque)), &opaque); err != nil { - return err - } - - // start the child driver - quit := make(chan struct{}) - errCh := make(chan error) - go func() { - d := rkbuiltin.NewChildDriver(os.Stderr) - dErr := d.RunChildDriver(opaque, quit) - errCh <- dErr - }() - defer func() { - logrus.Info("Stopping child driver") - quit <- struct{}{} - if err := <-errCh; err != nil { - logrus.WithError(err).Warn("Child driver returned error on exit") - } - }() - - // wait for stdin to be closed - if _, err := ioutil.ReadAll(os.Stdin); err != nil { - return err - } - return nil -} - -type logrusWriter struct { - prefix string -} - -func (w *logrusWriter) Write(p []byte) (int, error) { - logrus.Infof("%s%s", w.prefix, string(p)) - return len(p), nil -} diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/ports.go b/vendor/github.com/containers/podman/v3/pkg/specgen/generate/ports.go deleted file mode 100644 index 992b4a8e969..00000000000 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/ports.go +++ /dev/null @@ -1,431 +0,0 @@ -package generate - -import ( - "context" - "net" - "strconv" - "strings" - - "github.com/containers/common/libimage" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/utils" - - "github.com/containers/podman/v3/pkg/specgen" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - protoTCP = "tcp" - protoUDP = "udp" - protoSCTP = "sctp" -) - -// Parse port maps to OCICNI port mappings. -// Returns a set of OCICNI port mappings, and maps of utilized container and -// host ports. -func ParsePortMapping(portMappings []types.PortMapping) ([]types.OCICNIPortMapping, map[string]map[string]map[uint16]uint16, map[string]map[string]map[uint16]uint16, error) { - // First, we need to validate the ports passed in the specgen, and then - // convert them into CNI port mappings. - type tempMapping struct { - mapping types.OCICNIPortMapping - startOfRange bool - isInRange bool - } - tempMappings := []tempMapping{} - - // To validate, we need two maps: one for host ports, one for container - // ports. - // Each is a map of protocol to map of IP address to map of port to - // port (for hostPortValidate, it's host port to container port; - // for containerPortValidate, container port to host port. - // These will ensure no collisions. - hostPortValidate := make(map[string]map[string]map[uint16]uint16) - containerPortValidate := make(map[string]map[string]map[uint16]uint16) - - // Initialize the first level of maps (we can't really guess keys for - // the rest). - for _, proto := range []string{protoTCP, protoUDP, protoSCTP} { - hostPortValidate[proto] = make(map[string]map[uint16]uint16) - containerPortValidate[proto] = make(map[string]map[uint16]uint16) - } - - postAssignHostPort := false - - // Iterate through all port mappings, generating OCICNI PortMapping - // structs and validating there is no overlap. - for _, port := range portMappings { - // First, check proto - protocols, err := checkProtocol(port.Protocol, true) - if err != nil { - return nil, nil, nil, err - } - - // Validate host IP - hostIP := port.HostIP - if hostIP == "" { - hostIP = "0.0.0.0" - } - if ip := net.ParseIP(hostIP); ip == nil { - return nil, nil, nil, errors.Errorf("invalid IP address %s in port mapping", port.HostIP) - } - - // Validate port numbers and range. - len := port.Range - if len == 0 { - len = 1 - } - containerPort := port.ContainerPort - if containerPort == 0 { - return nil, nil, nil, errors.Errorf("container port number must be non-0") - } - hostPort := port.HostPort - if uint32(len-1)+uint32(containerPort) > 65535 { - return nil, nil, nil, errors.Errorf("container port range exceeds maximum allowable port number") - } - if uint32(len-1)+uint32(hostPort) > 65536 { - return nil, nil, nil, errors.Errorf("host port range exceeds maximum allowable port number") - } - - // Iterate through ports, populating maps to check for conflicts - // and generating CNI port mappings. - for _, p := range protocols { - hostIPMap := hostPortValidate[p] - ctrIPMap := containerPortValidate[p] - - hostPortMap, ok := hostIPMap[hostIP] - if !ok { - hostPortMap = make(map[uint16]uint16) - hostIPMap[hostIP] = hostPortMap - } - ctrPortMap, ok := ctrIPMap[hostIP] - if !ok { - ctrPortMap = make(map[uint16]uint16) - ctrIPMap[hostIP] = ctrPortMap - } - - // Iterate through all port numbers in the requested - // range. - var index uint16 - for index = 0; index < len; index++ { - cPort := containerPort + index - hPort := hostPort - // Only increment host port if it's not 0. - if hostPort != 0 { - hPort += index - } - - if cPort == 0 { - return nil, nil, nil, errors.Errorf("container port cannot be 0") - } - - // Host port is allowed to be 0. If it is, we - // select a random port on the host. - // This will happen *after* all other ports are - // placed, to ensure we don't accidentally - // select a port that a later mapping wanted. - if hPort == 0 { - // If we already have a host port - // assigned to their container port - - // just use that. - if ctrPortMap[cPort] != 0 { - hPort = ctrPortMap[cPort] - } else { - postAssignHostPort = true - } - } else { - testHPort := hostPortMap[hPort] - if testHPort != 0 && testHPort != cPort { - return nil, nil, nil, errors.Errorf("conflicting port mappings for host port %d (protocol %s)", hPort, p) - } - hostPortMap[hPort] = cPort - - // Mapping a container port to multiple - // host ports is allowed. - // We only store the latest of these in - // the container port map - we don't - // need to know all of them, just one. - testCPort := ctrPortMap[cPort] - ctrPortMap[cPort] = hPort - - // If we have an exact duplicate, just continue - if testCPort == hPort && testHPort == cPort { - continue - } - } - - // We appear to be clear. Make an OCICNI port - // struct. - // Don't use hostIP - we want to preserve the - // empty string hostIP by default for compat. - cniPort := types.OCICNIPortMapping{ - HostPort: int32(hPort), - ContainerPort: int32(cPort), - Protocol: p, - HostIP: port.HostIP, - } - tempMappings = append( - tempMappings, - tempMapping{ - mapping: cniPort, - startOfRange: port.Range > 1 && index == 0, - isInRange: port.Range > 1, - }, - ) - } - } - } - - // Handle any 0 host ports now by setting random container ports. - if postAssignHostPort { - remadeMappings := make([]types.OCICNIPortMapping, 0, len(tempMappings)) - - var ( - candidate int - err error - ) - - // Iterate over all - for _, tmp := range tempMappings { - p := tmp.mapping - - if p.HostPort != 0 { - remadeMappings = append(remadeMappings, p) - continue - } - - hostIPMap := hostPortValidate[p.Protocol] - ctrIPMap := containerPortValidate[p.Protocol] - - hostPortMap, ok := hostIPMap[p.HostIP] - if !ok { - hostPortMap = make(map[uint16]uint16) - hostIPMap[p.HostIP] = hostPortMap - } - ctrPortMap, ok := ctrIPMap[p.HostIP] - if !ok { - ctrPortMap = make(map[uint16]uint16) - ctrIPMap[p.HostIP] = ctrPortMap - } - - // See if container port has been used elsewhere - if ctrPortMap[uint16(p.ContainerPort)] != 0 { - // Duplicate definition. Let's not bother - // including it. - continue - } - - // Max retries to ensure we don't loop forever. - for i := 0; i < 15; i++ { - // Only get a random candidate for single entries or the start - // of a range. Otherwise we just increment the candidate. - if !tmp.isInRange || tmp.startOfRange { - candidate, err = utils.GetRandomPort() - if err != nil { - return nil, nil, nil, errors.Wrapf(err, "error getting candidate host port for container port %d", p.ContainerPort) - } - } else { - candidate++ - } - - if hostPortMap[uint16(candidate)] == 0 { - logrus.Debugf("Successfully assigned container port %d to host port %d (IP %s Protocol %s)", p.ContainerPort, candidate, p.HostIP, p.Protocol) - hostPortMap[uint16(candidate)] = uint16(p.ContainerPort) - ctrPortMap[uint16(p.ContainerPort)] = uint16(candidate) - p.HostPort = int32(candidate) - break - } - } - if p.HostPort == 0 { - return nil, nil, nil, errors.Errorf("could not find open host port to map container port %d to", p.ContainerPort) - } - remadeMappings = append(remadeMappings, p) - } - return remadeMappings, containerPortValidate, hostPortValidate, nil - } - - finalMappings := []types.OCICNIPortMapping{} - for _, m := range tempMappings { - finalMappings = append(finalMappings, m.mapping) - } - - return finalMappings, containerPortValidate, hostPortValidate, nil -} - -// Make final port mappings for the container -func createPortMappings(ctx context.Context, s *specgen.SpecGenerator, imageData *libimage.ImageData) ([]types.OCICNIPortMapping, map[uint16][]string, error) { - finalMappings, containerPortValidate, hostPortValidate, err := ParsePortMapping(s.PortMappings) - if err != nil { - return nil, nil, err - } - - // No exposed ports so return the port mappings we've made so far. - if len(s.Expose) == 0 && imageData == nil { - return finalMappings, nil, nil - } - - logrus.Debugf("Adding exposed ports") - - expose := make(map[uint16]string) - if imageData != nil { - expose, err = GenExposedPorts(imageData.Config.ExposedPorts) - if err != nil { - return nil, nil, err - } - } - - // We need to merge s.Expose into image exposed ports - for k, v := range s.Expose { - expose[k] = v - } - // There's been a request to expose some ports. Let's do that. - // Start by figuring out what needs to be exposed. - // This is a map of container port number to protocols to expose. - toExpose := make(map[uint16][]string) - for port, proto := range expose { - // Validate protocol first - protocols, err := checkProtocol(proto, false) - if err != nil { - return nil, nil, errors.Wrapf(err, "error validating protocols for exposed port %d", port) - } - - if port == 0 { - return nil, nil, errors.Errorf("cannot expose 0 as it is not a valid port number") - } - - // Check to see if the port is already present in existing - // mappings. - for _, p := range protocols { - ctrPortMap, ok := containerPortValidate[p]["0.0.0.0"] - if !ok { - ctrPortMap = make(map[uint16]uint16) - containerPortValidate[p]["0.0.0.0"] = ctrPortMap - } - - if portNum := ctrPortMap[port]; portNum == 0 { - // We want to expose this port for this protocol - exposeProto, ok := toExpose[port] - if !ok { - exposeProto = []string{} - } - exposeProto = append(exposeProto, p) - toExpose[port] = exposeProto - } - } - } - - // If not publishing exposed ports return mappings and exposed ports. - if !s.PublishExposedPorts { - return finalMappings, toExpose, nil - } - - // We now have a final list of ports that we want exposed. - // Let's find empty, unallocated host ports for them. - for port, protocols := range toExpose { - for _, p := range protocols { - // Find an open port on the host. - // I see a faint possibility that this will infinite - // loop trying to find a valid open port, so I've - // included a max-tries counter. - hostPort := 0 - tries := 15 - for hostPort == 0 && tries > 0 { - // We can't select a specific protocol, which is - // unfortunate for the UDP case. - candidate, err := utils.GetRandomPort() - if err != nil { - return nil, nil, err - } - - // Check if the host port is already bound - hostPortMap, ok := hostPortValidate[p]["0.0.0.0"] - if !ok { - hostPortMap = make(map[uint16]uint16) - hostPortValidate[p]["0.0.0.0"] = hostPortMap - } - - if checkPort := hostPortMap[uint16(candidate)]; checkPort != 0 { - // Host port is already allocated, try again - tries-- - continue - } - - hostPortMap[uint16(candidate)] = port - hostPort = candidate - logrus.Debugf("Mapping exposed port %d/%s to host port %d", port, p, hostPort) - - // Make a CNI port mapping - cniPort := types.OCICNIPortMapping{ - HostPort: int32(candidate), - ContainerPort: int32(port), - Protocol: p, - HostIP: "", - } - finalMappings = append(finalMappings, cniPort) - } - if tries == 0 && hostPort == 0 { - // We failed to find an open port. - return nil, nil, errors.Errorf("failed to find an open port to expose container port %d on the host", port) - } - } - } - - return finalMappings, nil, nil -} - -// Check a string to ensure it is a comma-separated set of valid protocols -func checkProtocol(protocol string, allowSCTP bool) ([]string, error) { - protocols := make(map[string]struct{}) - splitProto := strings.Split(protocol, ",") - // Don't error on duplicates - just deduplicate - for _, p := range splitProto { - p = strings.ToLower(p) - switch p { - case protoTCP, "": - protocols[protoTCP] = struct{}{} - case protoUDP: - protocols[protoUDP] = struct{}{} - case protoSCTP: - if !allowSCTP { - return nil, errors.Errorf("protocol SCTP is not allowed for exposed ports") - } - protocols[protoSCTP] = struct{}{} - default: - return nil, errors.Errorf("unrecognized protocol %q in port mapping", p) - } - } - - finalProto := []string{} - for p := range protocols { - finalProto = append(finalProto, p) - } - - // This shouldn't be possible, but check anyways - if len(finalProto) == 0 { - return nil, errors.Errorf("no valid protocols specified for port mapping") - } - - return finalProto, nil -} - -func GenExposedPorts(exposedPorts map[string]struct{}) (map[uint16]string, error) { - expose := make(map[uint16]string) - for imgExpose := range exposedPorts { - // Expose format is portNumber[/protocol] - splitExpose := strings.SplitN(imgExpose, "/", 2) - num, err := strconv.Atoi(splitExpose[0]) - if err != nil { - return nil, errors.Wrapf(err, "unable to convert image EXPOSE statement %q to port number", imgExpose) - } - if num > 65535 || num < 1 { - return nil, errors.Errorf("%d from image EXPOSE statement %q is not a valid port number", num, imgExpose) - } - // No need to validate protocol, we'll do it below. - if len(splitExpose) == 1 { - expose[uint16(num)] = "tcp" - } else { - expose[uint16(num)] = splitExpose[1] - } - } - return expose, nil -} diff --git a/vendor/github.com/containers/podman/v3/LICENSE b/vendor/github.com/containers/podman/v4/LICENSE similarity index 100% rename from vendor/github.com/containers/podman/v3/LICENSE rename to vendor/github.com/containers/podman/v4/LICENSE diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/filters.go b/vendor/github.com/containers/podman/v4/cmd/podman/parse/filters.go new file mode 100644 index 00000000000..8a10f2a97d6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/cmd/podman/parse/filters.go @@ -0,0 +1,20 @@ +package parse + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +func FilterArgumentsIntoFilters(filters []string) (url.Values, error) { + parsedFilters := make(url.Values) + for _, f := range filters { + t := strings.SplitN(f, "=", 2) + if len(t) < 2 { + return parsedFilters, errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f) + } + parsedFilters.Add(t[0], t[1]) + } + return parsedFilters, nil +} diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/json.go b/vendor/github.com/containers/podman/v4/cmd/podman/parse/json.go new file mode 100644 index 00000000000..85572a05701 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/cmd/podman/parse/json.go @@ -0,0 +1,10 @@ +package parse + +import "regexp" + +var jsonFormatRegex = regexp.MustCompile(`^\s*(json|{{\s*json\s*(\.)?\s*}})\s*$`) + +// MatchesJSONFormat test CLI --format string to be a JSON request. +func MatchesJSONFormat(s string) bool { + return jsonFormatRegex.Match([]byte(s)) +} diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go b/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go new file mode 100644 index 00000000000..870690db32b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go @@ -0,0 +1,191 @@ +// nolint +// most of these validate and parse functions have been taken from projectatomic/docker +// and modified for cri-o +package parse + +import ( + "bufio" + "fmt" + "net" + "net/url" + "os" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +const ( + Protocol_TCP Protocol = 0 + Protocol_UDP Protocol = 1 +) + +type Protocol int32 + +// PortMapping specifies the port mapping configurations of a sandbox. +type PortMapping struct { + // Protocol of the port mapping. + Protocol Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=runtime.Protocol" json:"protocol,omitempty"` + // Port number within the container. Default: 0 (not specified). + ContainerPort int32 `protobuf:"varint,2,opt,name=container_port,json=containerPort,proto3" json:"container_port,omitempty"` + // Port number on the host. Default: 0 (not specified). + HostPort int32 `protobuf:"varint,3,opt,name=host_port,json=hostPort,proto3" json:"host_port,omitempty"` + // Host IP. + HostIp string `protobuf:"bytes,4,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` +} + +// Note: for flags that are in the form , use the RAMInBytes function +// from the units package in docker/go-units/size.go + +var ( + whiteSpaces = " \t" + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// validateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). +// for add-host flag +func ValidateExtraHost(val string) (string, error) { // nolint + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := validateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// validateIPAddress validates an Ip address. +// for dns, ip, and ip6 flags also +func validateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +func ValidateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// GetAllLabels retrieves all labels given a potential label file and a number +// of labels provided from the command line. +func GetAllLabels(labelFile, inputLabels []string) (map[string]string, error) { + labels := make(map[string]string) + for _, file := range labelFile { + // Use of parseEnvFile still seems safe, as it's missing the + // extra parsing logic of parseEnv. + // There's an argument that we SHOULD be doing that parsing for + // all environment variables, even those sourced from files, but + // that would require a substantial rework. + if err := parseEnvFile(labels, file); err != nil { + // FIXME: parseEnvFile is using parseEnv, so we need to add extra + // logic for labels. + return nil, err + } + } + for _, label := range inputLabels { + split := strings.SplitN(label, "=", 2) + if split[0] == "" { + return nil, errors.Errorf("invalid label format: %q", label) + } + value := "" + if len(split) > 1 { + value = split[1] + } + labels[split[0]] = value + } + return labels, nil +} + +func parseEnv(env map[string]string, line string) error { + data := strings.SplitN(line, "=", 2) + + // catch invalid variables such as "=" or "=A" + if data[0] == "" { + return errors.Errorf("invalid environment variable: %q", line) + } + + // trim the front of a variable, but nothing else + name := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(name, whiteSpaces) { + return errors.Errorf("name %q has white spaces, poorly formatted name", name) + } + + if len(data) > 1 { + env[name] = data[1] + } else { + if strings.HasSuffix(name, "*") { + name = strings.TrimSuffix(name, "*") + for _, e := range os.Environ() { + part := strings.SplitN(e, "=", 2) + if len(part) < 2 { + continue + } + if strings.HasPrefix(part[0], name) { + env[part[0]] = part[1] + } + } + } else { + // if only a pass-through variable is given, clean it up. + if val, ok := os.LookupEnv(name); ok { + env[name] = val + } + } + } + return nil +} + +// parseEnvFile reads a file with environment variables enumerated by lines +func parseEnvFile(env map[string]string, filename string) error { + fh, err := os.Open(filename) + if err != nil { + return err + } + defer fh.Close() + + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + // trim the line from all leading whitespace first + line := strings.TrimLeft(scanner.Text(), whiteSpaces) + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + if err := parseEnv(env, line); err != nil { + return err + } + } + } + return scanner.Err() +} + +// ValidateFileName returns an error if filename contains ":" +// as it is currently not supported +func ValidateFileName(filename string) error { + if strings.Contains(filename, ":") { + return errors.Errorf("invalid filename (should not contain ':') %q", filename) + } + return nil +} + +// ValidURL checks a string urlStr is a url or not +func ValidURL(urlStr string) error { + url, err := url.ParseRequestURI(urlStr) + if err != nil { + return errors.Wrapf(err, "invalid url %q", urlStr) + } + if url.Scheme == "" { + return errors.Errorf("invalid url %q: missing scheme", urlStr) + } + return nil +} diff --git a/vendor/github.com/containers/podman/v3/libpod/boltdb_state.go b/vendor/github.com/containers/podman/v4/libpod/boltdb_state.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/boltdb_state.go rename to vendor/github.com/containers/podman/v4/libpod/boltdb_state.go index 1242a8d6bac..9745121c775 100644 --- a/vendor/github.com/containers/podman/v3/libpod/boltdb_state.go +++ b/vendor/github.com/containers/podman/v4/libpod/boltdb_state.go @@ -2,11 +2,14 @@ package libpod import ( "bytes" + "fmt" + "net" "os" "strings" "sync" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/libpod/define" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -212,7 +215,7 @@ func (s *BoltState) Refresh() error { return errors.Wrapf(err, "error unmarshalling state for pod %s", string(id)) } - // Clear the CGroup path + // Clear the Cgroup path state.CgroupPath = "" newStateBytes, err := json.Marshal(state) @@ -363,7 +366,7 @@ func (s *BoltState) GetDBConfig() (*DBConfig, error) { err = db.View(func(tx *bolt.Tx) error { configBucket, err := getRuntimeConfigBucket(tx) if err != nil { - return nil + return err } // Some of these may be nil @@ -971,7 +974,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) { } // GetNetworks returns the CNI networks this container is a part of. -func (s *BoltState) GetNetworks(ctr *Container) ([]string, error) { +func (s *BoltState) GetNetworks(ctr *Container) (map[string]types.PerNetworkOptions, error) { if !s.valid { return nil, define.ErrDBClosed } @@ -984,61 +987,9 @@ func (s *BoltState) GetNetworks(ctr *Container) ([]string, error) { return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) } - ctrID := []byte(ctr.ID()) - - db, err := s.getDBCon() - if err != nil { - return nil, err - } - defer s.deferredCloseDBCon(db) - - networks := []string{} - - err = db.View(func(tx *bolt.Tx) error { - ctrBucket, err := getCtrBucket(tx) - if err != nil { - return err - } - - dbCtr := ctrBucket.Bucket(ctrID) - if dbCtr == nil { - ctr.valid = false - return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID()) - } - - ctrNetworkBkt := dbCtr.Bucket(networksBkt) - if ctrNetworkBkt == nil { - return errors.Wrapf(define.ErrNoSuchNetwork, "container %s is not joined to any CNI networks", ctr.ID()) - } - - return ctrNetworkBkt.ForEach(func(network, v []byte) error { - networks = append(networks, string(network)) - return nil - }) - }) - if err != nil { - return nil, err - } - return networks, nil -} - -// GetNetworkAliases retrieves the network aliases for the given container in -// the given CNI network. -func (s *BoltState) GetNetworkAliases(ctr *Container, network string) ([]string, error) { - if !s.valid { - return nil, define.ErrDBClosed - } - - if !ctr.valid { - return nil, define.ErrCtrRemoved - } - - if network == "" { - return nil, errors.Wrapf(define.ErrInvalidArg, "network names must not be empty") - } - - if s.namespace != "" && s.namespace != ctr.config.Namespace { - return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) + // if the network mode is not bridge return no networks + if !ctr.config.NetMode.IsBridge() { + return nil, nil } ctrID := []byte(ctr.ID()) @@ -1049,7 +1000,9 @@ func (s *BoltState) GetNetworkAliases(ctr *Container, network string) ([]string, } defer s.deferredCloseDBCon(db) - aliases := []string{} + networks := make(map[string]types.PerNetworkOptions) + + var convertDB bool err = db.View(func(tx *bolt.Tx) error { ctrBucket, err := getCtrBucket(tx) @@ -1065,115 +1018,137 @@ func (s *BoltState) GetNetworkAliases(ctr *Container, network string) ([]string, ctrNetworkBkt := dbCtr.Bucket(networksBkt) if ctrNetworkBkt == nil { - // No networks joined, so no aliases - return nil - } - - inNetwork := ctrNetworkBkt.Get([]byte(network)) - if inNetwork == nil { - return errors.Wrapf(define.ErrNoAliases, "container %s is not part of network %s, no aliases found", ctr.ID(), network) - } - - ctrAliasesBkt := dbCtr.Bucket(aliasesBkt) - if ctrAliasesBkt == nil { - // No aliases - return nil - } - - netAliasesBkt := ctrAliasesBkt.Bucket([]byte(network)) - if netAliasesBkt == nil { - // No aliases for this specific network. + // convert if needed + convertDB = true return nil } - return netAliasesBkt.ForEach(func(alias, v []byte) error { - aliases = append(aliases, string(alias)) + return ctrNetworkBkt.ForEach(func(network, v []byte) error { + opts := types.PerNetworkOptions{} + if err := json.Unmarshal(v, &opts); err != nil { + // special case for backwards compat + // earlier version used the container id as value so we set a + // special error to indicate the we have to migrate the db + if !bytes.Equal(v, ctrID) { + return err + } + convertDB = true + } + networks[string(network)] = opts return nil }) }) if err != nil { return nil, err } + if convertDB { + err = db.Update(func(tx *bolt.Tx) error { + ctrBucket, err := getCtrBucket(tx) + if err != nil { + return err + } - return aliases, nil -} - -// GetAllNetworkAliases retrieves the network aliases for the given container in -// all CNI networks. -func (s *BoltState) GetAllNetworkAliases(ctr *Container) (map[string][]string, error) { - if !s.valid { - return nil, define.ErrDBClosed - } - - if !ctr.valid { - return nil, define.ErrCtrRemoved - } - - if s.namespace != "" && s.namespace != ctr.config.Namespace { - return nil, errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) - } - - ctrID := []byte(ctr.ID()) + dbCtr := ctrBucket.Bucket(ctrID) + if dbCtr == nil { + ctr.valid = false + return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID()) + } - db, err := s.getDBCon() - if err != nil { - return nil, err - } - defer s.deferredCloseDBCon(db) + var networkList []string - aliases := make(map[string][]string) + ctrNetworkBkt := dbCtr.Bucket(networksBkt) + if ctrNetworkBkt == nil { + ctrNetworkBkt, err = dbCtr.CreateBucket(networksBkt) + if err != nil { + return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID()) + } + // the container has no networks in the db lookup config and write to the db + networkList = ctr.config.NetworksDeprecated + // if there are no networks we have to add the default + if len(networkList) == 0 { + networkList = []string{ctr.runtime.config.Network.DefaultNetwork} + } + } else { + err = ctrNetworkBkt.ForEach(func(network, v []byte) error { + networkList = append(networkList, string(network)) + return nil + }) + if err != nil { + return err + } + } - err = db.View(func(tx *bolt.Tx) error { - ctrBucket, err := getCtrBucket(tx) - if err != nil { - return err - } + // the container has no networks in the db lookup config and write to the db + for i, network := range networkList { + var intName string + if ctr.state.NetInterfaceDescriptions != nil { + eth, exists := ctr.state.NetInterfaceDescriptions.getInterfaceByName(network) + if !exists { + return errors.Errorf("no network interface name for container %s on network %s", ctr.config.ID, network) + } + intName = eth + } else { + intName = fmt.Sprintf("eth%d", i) + } + getAliases := func(network string) []string { + var aliases []string + ctrAliasesBkt := dbCtr.Bucket(aliasesBkt) + if ctrAliasesBkt == nil { + return nil + } + netAliasesBkt := ctrAliasesBkt.Bucket([]byte(network)) + if netAliasesBkt == nil { + // No aliases for this specific network. + return nil + } - dbCtr := ctrBucket.Bucket(ctrID) - if dbCtr == nil { - ctr.valid = false - return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID()) - } + // lets ignore the error here there is nothing we can do + _ = netAliasesBkt.ForEach(func(alias, v []byte) error { + aliases = append(aliases, string(alias)) + return nil + }) + // also add the short container id as alias + return aliases + } - ctrAliasesBkt := dbCtr.Bucket(aliasesBkt) - if ctrAliasesBkt == nil { - // No aliases present - return nil - } + netOpts := &types.PerNetworkOptions{ + InterfaceName: intName, + // we have to add the short id as alias for docker compat + Aliases: append(getAliases(network), ctr.config.ID[:12]), + } + // only set the static ip/mac on the first network + if i == 0 { + if ctr.config.StaticIP != nil { + netOpts.StaticIPs = []net.IP{ctr.config.StaticIP} + } + netOpts.StaticMAC = ctr.config.StaticMAC + } - ctrNetworkBkt := dbCtr.Bucket(networksBkt) - if ctrNetworkBkt == nil { - // No networks joined, so no aliases - return nil - } + optsBytes, err := json.Marshal(netOpts) + if err != nil { + return err + } + // insert into network map because we need to return this + networks[network] = *netOpts - return ctrNetworkBkt.ForEach(func(network, v []byte) error { - netAliasesBkt := ctrAliasesBkt.Bucket(network) - if netAliasesBkt == nil { - return nil + err = ctrNetworkBkt.Put([]byte(network), optsBytes) + if err != nil { + return err + } } - - netAliases := []string{} - - _ = netAliasesBkt.ForEach(func(alias, v []byte) error { - netAliases = append(netAliases, string(alias)) - return nil - }) - - aliases[string(network)] = netAliases return nil }) - }) - if err != nil { - return nil, err + if err != nil { + return nil, err + } } - return aliases, nil + return networks, nil } // NetworkConnect adds the given container to the given network. If aliases are // specified, those will be added to the given network. -func (s *BoltState) NetworkConnect(ctr *Container, network string, aliases []string) error { +func (s *BoltState) NetworkConnect(ctr *Container, network string, opts types.PerNetworkOptions) error { if !s.valid { return define.ErrDBClosed } @@ -1190,6 +1165,11 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, aliases []str return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace) } + optBytes, err := json.Marshal(opts) + if err != nil { + return errors.Wrapf(err, "error marshalling network options JSON for container %s", ctr.ID()) + } + ctrID := []byte(ctr.ID()) db, err := s.getDBCon() @@ -1210,47 +1190,20 @@ func (s *BoltState) NetworkConnect(ctr *Container, network string, aliases []str return errors.Wrapf(define.ErrNoSuchCtr, "container %s does not exist in database", ctr.ID()) } - ctrAliasesBkt, err := dbCtr.CreateBucketIfNotExists(aliasesBkt) - if err != nil { - return errors.Wrapf(err, "error creating aliases bucket for container %s", ctr.ID()) - } - ctrNetworksBkt := dbCtr.Bucket(networksBkt) if ctrNetworksBkt == nil { - ctrNetworksBkt, err = dbCtr.CreateBucket(networksBkt) - if err != nil { - return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID()) - } - ctrNetworks := ctr.config.Networks - if len(ctrNetworks) == 0 { - ctrNetworks = []string{ctr.runtime.config.Network.DefaultNetwork} - } - // Copy in all the container's CNI networks - for _, net := range ctrNetworks { - if err := ctrNetworksBkt.Put([]byte(net), ctrID); err != nil { - return errors.Wrapf(err, "error adding container %s network %s to DB", ctr.ID(), net) - } - } + return errors.Wrapf(define.ErrNoSuchNetwork, "container %s does not have a network bucket", ctr.ID()) } netConnected := ctrNetworksBkt.Get([]byte(network)) if netConnected != nil { - return errors.Wrapf(define.ErrNetworkExists, "container %s is already connected to CNI network %q", ctr.ID(), network) + return errors.Wrapf(define.ErrNetworkExists, "container %s is already connected to network %q", ctr.ID(), network) } // Add the network - if err := ctrNetworksBkt.Put([]byte(network), ctrID); err != nil { + if err := ctrNetworksBkt.Put([]byte(network), optBytes); err != nil { return errors.Wrapf(err, "error adding container %s to network %s in DB", ctr.ID(), network) } - ctrNetAliasesBkt, err := ctrAliasesBkt.CreateBucketIfNotExists([]byte(network)) - if err != nil { - return errors.Wrapf(err, "error adding container %s network aliases bucket for network %s", ctr.ID(), network) - } - for _, alias := range aliases { - if err := ctrNetAliasesBkt.Put([]byte(alias), ctrID); err != nil { - return errors.Wrapf(err, "error adding container %s network alias %s for network %s", ctr.ID(), alias, network) - } - } return nil }) } diff --git a/vendor/github.com/containers/podman/v3/libpod/boltdb_state_internal.go b/vendor/github.com/containers/podman/v4/libpod/boltdb_state_internal.go similarity index 95% rename from vendor/github.com/containers/podman/v3/libpod/boltdb_state_internal.go rename to vendor/github.com/containers/podman/v4/libpod/boltdb_state_internal.go index 3e3c17a9e07..d6f035af936 100644 --- a/vendor/github.com/containers/podman/v3/libpod/boltdb_state_internal.go +++ b/vendor/github.com/containers/podman/v4/libpod/boltdb_state_internal.go @@ -7,8 +7,8 @@ import ( "runtime" "strings" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -384,6 +384,15 @@ func (s *BoltState) getContainerConfigFromDB(id []byte, config *ContainerConfig, return errors.Wrapf(err, "error unmarshalling container %s config", string(id)) } + // convert ports to the new format if needed + if len(config.ContainerNetworkConfig.OldPortMappings) > 0 && len(config.ContainerNetworkConfig.PortMappings) == 0 { + config.ContainerNetworkConfig.PortMappings = ocicniPortsToNetTypesPorts(config.ContainerNetworkConfig.OldPortMappings) + // keep the OldPortMappings in case an user has to downgrade podman + + // indicate the the config was modified and should be written back to the db when possible + config.rewrite = true + } + return nil } @@ -533,8 +542,12 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { ctr.ID(), s.namespace, ctr.config.Namespace) } + // Set the original networks to nil. We can save some space by not storing it in the config + // since we store it in a different mutable bucket anyway. + configNetworks := ctr.config.Networks + ctr.config.Networks = nil + // JSON container structs to insert into DB - // TODO use a higher-performance struct encoding than JSON configJSON, err := json.Marshal(ctr.config) if err != nil { return errors.Wrapf(err, "error marshalling container %s config to JSON", ctr.ID()) @@ -554,6 +567,25 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { ctrNamespace = []byte(ctr.config.Namespace) } + // make sure to marshal the network options before we get the db lock + networks := make(map[string][]byte, len(configNetworks)) + for net, opts := range configNetworks { + // Check that we don't have any empty network names + if net == "" { + return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string") + } + if opts.InterfaceName == "" { + return errors.Wrapf(define.ErrInvalidArg, "network interface name cannot be an empty string") + } + // always add the short id as alias for docker compat + opts.Aliases = append(opts.Aliases, ctr.config.ID[:12]) + optBytes, err := json.Marshal(opts) + if err != nil { + return errors.Wrapf(err, "error marshalling network options JSON for container %s", ctr.ID()) + } + networks[net] = optBytes + } + db, err := s.getDBCon() if err != nil { return err @@ -637,23 +669,6 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { return errors.Wrapf(err, "name \"%s\" is in use", ctr.Name()) } - allNets := make(map[string]bool) - - // Check that we don't have any empty network names - for _, net := range ctr.config.Networks { - if net == "" { - return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string") - } - allNets[net] = true - } - - // Each network we have aliases for, must exist in networks - for net := range ctr.config.NetworkAliases { - if !allNets[net] { - return errors.Wrapf(define.ErrNoSuchNetwork, "container %s has network aliases for network %q but is not part of that network", ctr.ID(), net) - } - } - // No overlapping containers // Add the new container to the DB if err := idsBucket.Put(ctrID, ctrName); err != nil { @@ -697,34 +712,17 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error { return errors.Wrapf(err, "error adding container %s netns path to DB", ctr.ID()) } } - if ctr.config.Networks != nil { + if len(networks) > 0 { ctrNetworksBkt, err := newCtrBkt.CreateBucket(networksBkt) if err != nil { return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID()) } - for _, network := range ctr.config.Networks { - if err := ctrNetworksBkt.Put([]byte(network), ctrID); err != nil { + for network, opts := range networks { + if err := ctrNetworksBkt.Put([]byte(network), opts); err != nil { return errors.Wrapf(err, "error adding network %q to networks bucket for container %s", network, ctr.ID()) } } } - if ctr.config.NetworkAliases != nil { - ctrAliasesBkt, err := newCtrBkt.CreateBucket(aliasesBkt) - if err != nil { - return errors.Wrapf(err, "error creating network aliases bucket for container %s", ctr.ID()) - } - for net, aliases := range ctr.config.NetworkAliases { - netAliasesBkt, err := ctrAliasesBkt.CreateBucket([]byte(net)) - if err != nil { - return errors.Wrapf(err, "error creating network aliases bucket for network %q in container %s", net, ctr.ID()) - } - for _, alias := range aliases { - if err := netAliasesBkt.Put([]byte(alias), ctrID); err != nil { - return errors.Wrapf(err, "error creating network alias %q in network %q for container %s", alias, net, ctr.ID()) - } - } - } - } if _, err := newCtrBkt.CreateBucket(dependenciesBkt); err != nil { return errors.Wrapf(err, "error creating dependencies bucket for container %s", ctr.ID()) diff --git a/vendor/github.com/containers/podman/v3/libpod/boltdb_state_linux.go b/vendor/github.com/containers/podman/v4/libpod/boltdb_state_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v3/libpod/boltdb_state_linux.go rename to vendor/github.com/containers/podman/v4/libpod/boltdb_state_linux.go index 4fb3236a071..8bb10fb63c6 100644 --- a/vendor/github.com/containers/podman/v3/libpod/boltdb_state_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/boltdb_state_linux.go @@ -1,9 +1,10 @@ +//go:build linux // +build linux package libpod import ( - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v3/libpod/container.go b/vendor/github.com/containers/podman/v4/libpod/container.go similarity index 90% rename from vendor/github.com/containers/podman/v3/libpod/container.go rename to vendor/github.com/containers/podman/v4/libpod/container.go index 4d15c04c51c..457b290b72c 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container.go +++ b/vendor/github.com/containers/podman/v4/libpod/container.go @@ -6,22 +6,24 @@ import ( "io/ioutil" "net" "os" + "strings" "time" types040 "github.com/containernetworking/cni/pkg/types/040" + "github.com/containers/common/libnetwork/cni" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/lock" - "github.com/containers/podman/v3/libpod/network/cni" - "github.com/containers/podman/v3/libpod/network/types" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/lock" "github.com/containers/storage" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// CgroupfsDefaultCgroupParent is the cgroup parent for CGroupFS in libpod +// CgroupfsDefaultCgroupParent is the cgroup parent for CgroupFS in libpod const CgroupfsDefaultCgroupParent = "/libpod_parent" // SystemdDefaultCgroupParent is the cgroup parent for the systemd cgroup @@ -54,7 +56,7 @@ const ( UserNS LinuxNS = iota // UTSNS is the UTS namespace UTSNS LinuxNS = iota - // CgroupNS is the CGroup namespace + // CgroupNS is the Cgroup namespace CgroupNS LinuxNS = iota ) @@ -211,6 +213,15 @@ type ContainerState struct { // containerPlatformState holds platform-specific container state. containerPlatformState + + // Following checkpoint/restore related information is displayed + // if the container has been checkpointed or restored. + CheckpointedTime time.Time `json:"checkpointedTime,omitempty"` + RestoredTime time.Time `json:"restoredTime,omitempty"` + CheckpointLog string `json:"checkpointLog,omitempty"` + CheckpointPath string `json:"checkpointPath,omitempty"` + RestoreLog string `json:"restoreLog,omitempty"` + Restored bool `json:"restored,omitempty"` } // ContainerNamedVolume is a named volume that will be mounted into the @@ -259,6 +270,8 @@ type ContainerSecret struct { GID uint32 // Mode is the mode of the secret file Mode uint32 + // Secret target inside container + Target string } // ContainerNetworkDescriptions describes the relationship between the CNI @@ -275,9 +288,25 @@ func (c *Container) Config() *ContainerConfig { return nil } + if c != nil { + networks, err := c.networks() + if err != nil { + return nil + } + + returnConfig.Networks = networks + } + return returnConfig } +// ConfigNoCopy returns the configuration used by the container. +// Note that the returned value is not a copy and must hence +// only be used in a reading fashion. +func (c *Container) ConfigNoCopy() *ContainerConfig { + return c.config +} + // DeviceHostSrc returns the user supplied device to be passed down in the pod func (c *Container) DeviceHostSrc() []spec.LinuxDevice { return c.config.DeviceHostSrc @@ -404,7 +433,10 @@ func (c *Container) MountLabel() string { // Systemd returns whether the container will be running in systemd mode func (c *Container) Systemd() bool { - return c.config.Systemd + if c.config.Systemd != nil { + return *c.config.Systemd + } + return false } // User returns the user who the container is run as @@ -465,7 +497,7 @@ func (c *Container) NewNetNS() bool { // PortMappings returns the ports that will be mapped into a container if // a new network namespace is created // If NewNetNS() is false, this value is unused -func (c *Container) PortMappings() ([]types.OCICNIPortMapping, error) { +func (c *Container) PortMappings() ([]types.PortMapping, error) { // First check if the container belongs to a network namespace (like a pod) if len(c.config.NetNsCtr) > 0 { netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr) @@ -562,7 +594,7 @@ func (c *Container) CreatedTime() time.Time { return c.config.CreatedTime } -// CgroupParent gets the container's CGroup parent +// CgroupParent gets the container's Cgroup parent func (c *Container) CgroupParent() string { return c.config.CgroupParent } @@ -605,6 +637,15 @@ func (c *Container) RuntimeName() string { // Hostname gets the container's hostname func (c *Container) Hostname() string { + if c.config.UTSNsCtr != "" { + utsNsCtr, err := c.runtime.GetContainer(c.config.UTSNsCtr) + if err != nil { + // should we return an error here? + logrus.Errorf("unable to lookup uts namespace for container %s: %v", c.ID(), err) + return "" + } + return utsNsCtr.Hostname() + } if c.config.Spec.Hostname != "" { return c.config.Spec.Hostname } @@ -894,10 +935,10 @@ func (c *Container) CgroupManager() string { return cgroupManager } -// CGroupPath returns a cgroups "path" for the given container. +// CgroupPath returns a cgroups "path" for the given container. // Note that the container must be running. Otherwise, an error // is returned. -func (c *Container) CGroupPath() (string, error) { +func (c *Container) CgroupPath() (string, error) { if !c.batched { c.lock.Lock() defer c.lock.Unlock() @@ -935,6 +976,11 @@ func (c *Container) cGroupPath() (string, error) { procPath := fmt.Sprintf("/proc/%d/cgroup", c.state.PID) lines, err := ioutil.ReadFile(procPath) if err != nil { + // If the file doesn't exist, it means the container could have been terminated + // so report it. + if os.IsNotExist(err) { + return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID()) + } return "", err } @@ -961,6 +1007,29 @@ func (c *Container) cGroupPath() (string, error) { return "", errors.Errorf("could not find any cgroup in %q", procPath) } + cgroupManager := c.CgroupManager() + switch { + case c.config.CgroupsMode == cgroupSplit: + name := fmt.Sprintf("/libpod-payload-%s/", c.ID()) + if index := strings.LastIndex(cgroupPath, name); index >= 0 { + return cgroupPath[:index+len(name)-1], nil + } + case cgroupManager == config.CgroupfsCgroupsManager: + name := fmt.Sprintf("/libpod-%s/", c.ID()) + if index := strings.LastIndex(cgroupPath, name); index >= 0 { + return cgroupPath[:index+len(name)-1], nil + } + case cgroupManager == config.SystemdCgroupsManager: + // When running under systemd, try to detect the scope that was requested + // to be created. It improves the heuristic since we report the first + // cgroup that was created instead of the cgroup where PID 1 might have + // moved to. + name := fmt.Sprintf("/libpod-%s.scope/", c.ID()) + if index := strings.LastIndex(cgroupPath, name); index >= 0 { + return cgroupPath[:index+len(name)-1], nil + } + } + return cgroupPath, nil } @@ -1120,7 +1189,7 @@ func (c *Container) Umask() string { return c.config.Umask } -//Secrets return the secrets in the container +// Secrets return the secrets in the container func (c *Container) Secrets() []*ContainerSecret { return c.config.Secrets } @@ -1134,17 +1203,28 @@ func (c *Container) Secrets() []*ContainerSecret { // is joining the default CNI network - the network name will be included in the // returned array of network names, but the container did not explicitly join // this network. -func (c *Container) Networks() ([]string, bool, error) { +func (c *Container) Networks() ([]string, error) { if !c.batched { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return nil, false, err + return nil, err } } - return c.networks() + networks, err := c.networks() + if err != nil { + return nil, err + } + + names := make([]string, 0, len(networks)) + + for name := range networks { + names = append(names, name) + } + + return names, nil } // NetworkMode gets the configured network mode for the container. @@ -1188,36 +1268,11 @@ func (c *Container) NetworkMode() string { } // Unlocked accessor for networks -func (c *Container) networks() ([]string, bool, error) { - networks, err := c.runtime.state.GetNetworks(c) - if err != nil && errors.Cause(err) == define.ErrNoSuchNetwork { - if len(c.config.Networks) == 0 && c.config.NetMode.IsBridge() { - return []string{c.runtime.config.Network.DefaultNetwork}, true, nil - } - return c.config.Networks, false, nil - } - - return networks, false, err -} - -// networksByNameIndex provides us with a map of container networks where key -// is network name and value is the index position -func (c *Container) networksByNameIndex() (map[string]int, error) { - networks, _, err := c.networks() - if err != nil { - return nil, err +func (c *Container) networks() (map[string]types.PerNetworkOptions, error) { + if c != nil && c.runtime != nil && c.runtime.state != nil { // can fail if c.networks is called from the tests + return c.runtime.state.GetNetworks(c) } - networkNamesByIndex := make(map[string]int, len(networks)) - for index, name := range networks { - networkNamesByIndex[name] = index - } - return networkNamesByIndex, nil -} - -// add puts the new given CNI network name into the tracking map -// and assigns it a new integer based on the map length -func (d ContainerNetworkDescriptions) add(networkName string) { - d[networkName] = len(d) + return nil, nil } // getInterfaceByName returns a formatted interface name for a given @@ -1238,9 +1293,7 @@ func (c *Container) getNetworkStatus() map[string]types.StatusBlock { return c.state.NetworkStatus } if c.state.NetworkStatusOld != nil { - // Note: NetworkStatusOld does not contain the network names so we get them extra - // Generally the order should be the same - networks, _, err := c.networks() + networks, err := c.networks() if err != nil { return nil } @@ -1248,12 +1301,16 @@ func (c *Container) getNetworkStatus() map[string]types.StatusBlock { return nil } result := make(map[string]types.StatusBlock, len(c.state.NetworkStatusOld)) - for i := range c.state.NetworkStatusOld { + i := 0 + // Note: NetworkStatusOld does not contain the network names so we get them extra + // We cannot guarantee the same order but after a state refresh it should work + for netName := range networks { status, err := cni.CNIResultToStatus(c.state.NetworkStatusOld[i]) if err != nil { return nil } - result[networks[i]] = status + result[netName] = status + i++ } c.state.NetworkStatus = result _ = c.save() diff --git a/vendor/github.com/containers/podman/v3/libpod/container_api.go b/vendor/github.com/containers/podman/v4/libpod/container_api.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/container_api.go rename to vendor/github.com/containers/podman/v4/libpod/container_api.go index 50be0eea412..a6fcf709d84 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_api.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_api.go @@ -9,9 +9,9 @@ import ( "sync" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/pkg/signal" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/signal" "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -184,7 +184,7 @@ func (c *Container) StopWithTimeout(timeout uint) error { return define.ErrCtrStopped } - if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) { + if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopping) { return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created or running containers. %s is in state %s", c.ID(), c.state.State.String()) } @@ -229,8 +229,7 @@ func (c *Container) Kill(signal uint) error { // This function returns when the attach finishes. It does not hold the lock for // the duration of its runtime, only using it at the beginning to verify state. func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize) error { - switch c.LogDriver() { - case define.PassthroughLogging: + if c.LogDriver() == define.PassthroughLogging { return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot attach") } if !c.batched { @@ -690,7 +689,7 @@ func (c *Container) Sync() error { // If runtime knows about the container, update its status in runtime // And then save back to disk - if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped) { + if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopped, define.ContainerStateStopping) { oldState := c.state.State if err := c.ociRuntime.UpdateContainerStatus(c); err != nil { return err @@ -754,6 +753,9 @@ type ContainerCheckpointOptions struct { // TargetFile tells the API to read (or write) the checkpoint image // from (or to) the filename set in TargetFile TargetFile string + // CheckpointImageID tells the API to restore the container from + // checkpoint image with ID set in CheckpointImageID + CheckpointImageID string // Name tells the API that during restore from an exported // checkpoint archive a new name should be used for the // restored container @@ -781,6 +783,9 @@ type ContainerCheckpointOptions struct { // ImportPrevious tells the API to restore container with two // images. One is TargetFile, the other is ImportPrevious. ImportPrevious string + // CreateImage tells Podman to create an OCI image from container + // checkpoint in the local image store. + CreateImage string // Compression tells the API which compression to use for // the exported checkpoint archive. Compression archive.Compression @@ -794,21 +799,32 @@ type ContainerCheckpointOptions struct { // container no PID 1 will be in the namespace and that is not // possible. Pod string + // PrintStats tells the API to fill out the statistics about + // how much time each component in the stack requires to + // checkpoint a container. + PrintStats bool + // FileLocks tells the API to checkpoint/restore a container + // with file-locks + FileLocks bool } // Checkpoint checkpoints a container -func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) error { +// The return values *define.CRIUCheckpointRestoreStatistics and int64 (time +// the runtime needs to checkpoint the container) are only set if +// options.PrintStats is set to true. Not setting options.PrintStats to true +// will return nil and 0. +func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) (*define.CRIUCheckpointRestoreStatistics, int64, error) { logrus.Debugf("Trying to checkpoint container %s", c.ID()) if options.TargetFile != "" { if err := c.prepareCheckpointExport(); err != nil { - return err + return nil, 0, err } } if options.WithPrevious { if err := c.canWithPrevious(); err != nil { - return err + return nil, 0, err } } @@ -817,14 +833,18 @@ func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointO defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return err + return nil, 0, err } } return c.checkpoint(ctx, options) } // Restore restores a container -func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) error { +// The return values *define.CRIUCheckpointRestoreStatistics and int64 (time +// the runtime needs to restore the container) are only set if +// options.PrintStats is set to true. Not setting options.PrintStats to true +// will return nil and 0. +func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (*define.CRIUCheckpointRestoreStatistics, int64, error) { if options.Pod == "" { logrus.Debugf("Trying to restore container %s", c.ID()) } else { @@ -835,7 +855,7 @@ func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOpti defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - return err + return nil, 0, err } } defer c.newContainerEvent(events.Restore) @@ -868,7 +888,7 @@ func (c *Container) CopyFromArchive(ctx context.Context, containerPath string, c } } - return c.copyFromArchive(ctx, containerPath, chown, rename, tarStream) + return c.copyFromArchive(containerPath, chown, rename, tarStream) } // CopyToArchive copies the contents from the specified path *inside* the @@ -883,7 +903,7 @@ func (c *Container) CopyToArchive(ctx context.Context, containerPath string, tar } } - return c.copyToArchive(ctx, containerPath, tarStream) + return c.copyToArchive(containerPath, tarStream) } // Stat the specified path *inside* the container and return a file info. @@ -906,9 +926,13 @@ func (c *Container) Stat(ctx context.Context, containerPath string) (*define.Fil if err != nil { return nil, err } - defer c.unmount(false) + defer func() { + if err := c.unmount(false); err != nil { + logrus.Errorf("Unmounting container %s: %v", c.ID(), err) + } + }() } - info, _, _, err := c.stat(ctx, mountPoint, containerPath) + info, _, _, err := c.stat(mountPoint, containerPath) return info, err } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_commit.go b/vendor/github.com/containers/podman/v4/libpod/container_commit.go similarity index 96% rename from vendor/github.com/containers/podman/v3/libpod/container_commit.go rename to vendor/github.com/containers/podman/v4/libpod/container_commit.go index 6ae225cbcc3..7018ee7d874 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_commit.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_commit.go @@ -9,9 +9,9 @@ import ( "github.com/containers/common/libimage" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - libpodutil "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + libpodutil "github.com/containers/podman/v4/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -27,6 +27,7 @@ type ContainerCommitOptions struct { Author string Message string Changes []string + Squash bool } // Commit commits the changes between a container and its image, creating a new @@ -63,6 +64,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai commitOptions := buildah.CommitOptions{ SignaturePolicyPath: options.SignaturePolicyPath, ReportWriter: options.ReportWriter, + Squash: options.Squash, SystemContext: c.runtime.imageContext, PreferredManifestType: options.PreferredManifestType, } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_config.go b/vendor/github.com/containers/podman/v4/libpod/container_config.go similarity index 82% rename from vendor/github.com/containers/podman/v3/libpod/container_config.go rename to vendor/github.com/containers/podman/v4/libpod/container_config.go index 54d102a713e..371a1dec083 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_config.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_config.go @@ -4,10 +4,11 @@ import ( "net" "time" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/namespaces" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/specgen" "github.com/containers/storage" spec "github.com/opencontainers/runtime-spec/specs-go" ) @@ -78,6 +79,11 @@ type ContainerConfig struct { // These containers must be started before this container is started. Dependencies []string + // rewrite is an internal bool to indicate that the config was modified after + // a read from the db, e.g. to migrate config fields after an upgrade. + // This field should never be written to the db, the json tag ensures this. + rewrite bool `json:"-"` + // embedded sub-configs ContainerRootFSConfig ContainerSecurityConfig @@ -114,6 +120,10 @@ type ContainerRootFSConfig struct { // with the size specified in ShmSize and populate this with the path of // said tmpfs. ShmDir string `json:"ShmDir,omitempty"` + // NoShmShare indicates whether /dev/shm can be shared with other containers + NoShmShare bool `json:"NOShmShare,omitempty"` + // NoShm indicates whether a tmpfs should be created and mounted on /dev/shm + NoShm bool `json:"NoShm,omitempty"` // ShmSize is the size of the container's SHM. Only used if ShmDir was // not set manually at time of creation. ShmSize int64 `json:"shmSize"` @@ -153,9 +163,17 @@ type ContainerRootFSConfig struct { Secrets []*ContainerSecret `json:"secrets,omitempty"` // SecretPath is the secrets location in storage SecretsPath string `json:"secretsPath"` + // StorageOpts to be used when creating rootfs + StorageOpts map[string]string `json:"storageOpts"` // Volatile specifies whether the container storage can be optimized // at the cost of not syncing all the dirty files in memory. Volatile bool `json:"volatile,omitempty"` + // Passwd allows to user to override podman's passwd/group file setup + Passwd *bool `json:"passwd,omitempty"` + // ChrootDirs is an additional set of directories that need to be + // treated as root directories. Standard bind mounts will be mounted + // into paths relative to these directories. + ChrootDirs []string `json:"chroot_directories,omitempty"` } // ContainerSecurityConfig is an embedded sub-config providing security configuration @@ -189,6 +207,8 @@ type ContainerSecurityConfig struct { // Groups are additional groups to add the container's user to. These // are resolved within the container using the container's /etc/passwd. Groups []string `json:"groups,omitempty"` + // HostUsers are a list of host user accounts to add to /etc/passwd + HostUsers []string `json:"HostUsers,omitempty"` // AddCurrentUserPasswdEntry indicates that Libpod should ensure that // the container's /etc/passwd contains an entry for the user running // Libpod - mostly used in rootless containers where the user running @@ -222,15 +242,22 @@ type ContainerNetworkConfig struct { // StaticIP is a static IP to request for the container. // This cannot be set unless CreateNetNS is set. // If not set, the container will be dynamically assigned an IP by CNI. + // Deprecated: Do no use this anymore, this is only for DB backwards compat. StaticIP net.IP `json:"staticIP"` // StaticMAC is a static MAC to request for the container. // This cannot be set unless CreateNetNS is set. // If not set, the container will be dynamically assigned a MAC by CNI. - StaticMAC net.HardwareAddr `json:"staticMAC"` + // Deprecated: Do no use this anymore, this is only for DB backwards compat. + StaticMAC types.HardwareAddr `json:"staticMAC"` // PortMappings are the ports forwarded to the container's network // namespace // These are not used unless CreateNetNS is true - PortMappings []types.OCICNIPortMapping `json:"portMappings,omitempty"` + PortMappings []types.PortMapping `json:"newPortMappings,omitempty"` + // OldPortMappings are the ports forwarded to the container's network + // namespace. As of podman 4.0 this field is deprecated, use PortMappings + // instead. The db will convert the old ports to the new structure for you. + // These are not used unless CreateNetNS is true + OldPortMappings []types.OCICNIPortMapping `json:"portMappings,omitempty"` // ExposedPorts are the ports which are exposed but not forwarded // into the container. // The map key is the port and the string slice contains the protocols, @@ -257,24 +284,24 @@ type ContainerNetworkConfig struct { // Hosts to add in container // Will be appended to host's host file HostAdd []string `json:"hostsAdd,omitempty"` - // Network names (CNI) to add container to. Empty to use default network. + // Network names with the network specific options. + // Please note that these can be altered at runtime. The actual list is + // stored in the DB and should be retrieved from there via c.networks() + // this value is only used for container create. + // Added in podman 4.0, previously NetworksDeprecated was used. Make + // sure to not change the json tags. + Networks map[string]types.PerNetworkOptions `json:"newNetworks,omitempty"` + // Network names to add container to. Empty to use default network. // Please note that these can be altered at runtime. The actual list is // stored in the DB and should be retrieved from there; this is only the // set of networks the container was *created* with. - Networks []string `json:"networks,omitempty"` + // Deprecated: Do no use this anymore, this is only for DB backwards compat. + // Also note that we need to keep the old json tag to decode from DB correctly + NetworksDeprecated []string `json:"networks,omitempty"` // Network mode specified for the default network. NetMode namespaces.NetworkMode `json:"networkMode,omitempty"` // NetworkOptions are additional options for each network NetworkOptions map[string][]string `json:"network_options,omitempty"` - // NetworkAliases are aliases that will be added to each network. - // These are additional names that this container can be accessed as via - // DNS when the CNI dnsname plugin is in use. - // Please note that these can be altered at runtime. As such, the actual - // list is stored in the database and should be retrieved from there; - // this is only the set of aliases the container was *created with*. - // Formatted as map of network name to aliases. All network names must - // be present in the Networks list above. - NetworkAliases map[string][]string `json:"network_alises,omitempty"` } // ContainerImageConfig is an embedded sub-config providing image configuration @@ -316,7 +343,7 @@ type ContainerMiscConfig struct { // CgroupManager is the cgroup manager used to create this container. // If empty, the runtime default will be used. CgroupManager string `json:"cgroupManager,omitempty"` - // NoCgroups indicates that the container will not create CGroups. It is + // NoCgroups indicates that the container will not create Cgroups. It is // incompatible with CgroupParent. Deprecated in favor of CgroupsMode. NoCgroups bool `json:"noCgroups,omitempty"` // CgroupsMode indicates how the container will create cgroups @@ -352,20 +379,13 @@ type ContainerMiscConfig struct { PostConfigureNetNS bool `json:"postConfigureNetNS"` // OCIRuntime used to create the container OCIRuntime string `json:"runtime,omitempty"` - // ExitCommand is the container's exit command. - // This Command will be executed when the container exits by Conmon. - // It is usually used to invoke post-run cleanup - for example, in - // Podman, it invokes `podman container cleanup`, which in turn calls - // Libpod's Cleanup() API to unmount the container and clean up its - // network. - ExitCommand []string `json:"exitCommand,omitempty"` // IsInfra is a bool indicating whether this container is an infra container used for // sharing kernel namespaces in a pod IsInfra bool `json:"pause"` // SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed SdNotifyMode string `json:"sdnotifyMode,omitempty"` - // Systemd tells libpod to setup the container in systemd mode - Systemd bool `json:"systemd"` + // Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false + Systemd *bool `json:"systemd,omitempty"` // HealthCheckConfig has the health check command and related timings HealthCheckConfig *manifest.Schema2HealthConfig `json:"healthcheck"` // PreserveFDs is a number of additional file descriptors (in addition @@ -388,4 +408,23 @@ type ContainerMiscConfig struct { // InitContainerType specifies if the container is an initcontainer // and if so, what type: always or once are possible non-nil entries InitContainerType string `json:"init_container_type,omitempty"` + // PasswdEntry specifies arbitrary data to append to a file. + PasswdEntry string `json:"passwd_entry,omitempty"` +} + +// InfraInherit contains the compatible options inheritable from the infra container +type InfraInherit struct { + ApparmorProfile string `json:"apparmor_profile,omitempty"` + CapAdd []string `json:"cap_add,omitempty"` + CapDrop []string `json:"cap_drop,omitempty"` + HostDeviceList []spec.LinuxDevice `json:"host_device_list,omitempty"` + ImageVolumes []*specgen.ImageVolume `json:"image_volumes,omitempty"` + InfraResources *spec.LinuxResources `json:"resource_limits,omitempty"` + Mounts []spec.Mount `json:"mounts,omitempty"` + NoNewPrivileges bool `json:"no_new_privileges,omitempty"` + OverlayVolumes []*specgen.OverlayVolume `json:"overlay_volumes,omitempty"` + SeccompPolicy string `json:"seccomp_policy,omitempty"` + SeccompProfilePath string `json:"seccomp_profile_path,omitempty"` + SelinuxOpts []string `json:"selinux_opts,omitempty"` + Volumes []*specgen.NamedVolume `json:"volumes,omitempty"` } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_copy_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_copy_linux.go similarity index 88% rename from vendor/github.com/containers/podman/v3/libpod/container_copy_linux.go rename to vendor/github.com/containers/podman/v4/libpod/container_copy_linux.go index 7d4dd0d4693..7566fbb125e 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_copy_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_copy_linux.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package libpod import ( - "context" "io" "os" "path/filepath" @@ -13,17 +13,17 @@ import ( buildahCopiah "github.com/containers/buildah/copier" "github.com/containers/buildah/pkg/chrootuser" "github.com/containers/buildah/util" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" - "github.com/docker/docker/pkg/archive" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) -func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool, rename map[string]string, reader io.Reader) (func() error, error) { +func (c *Container) copyFromArchive(path string, chown bool, rename map[string]string, reader io.Reader) (func() error, error) { var ( mountPoint string resolvedRoot string @@ -48,7 +48,11 @@ func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool if err != nil { return nil, err } - unmount = func() { c.unmount(false) } + unmount = func() { + if err := c.unmount(false); err != nil { + logrus.Errorf("Failed to unmount container: %v", err) + } + } } if c.state.State == define.ContainerStateRunning { @@ -92,7 +96,7 @@ func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool Rename: rename, } - return c.joinMountAndExec(ctx, + return c.joinMountAndExec( func() error { return buildahCopiah.Put(resolvedRoot, resolvedPath, putOptions, decompressed) }, @@ -100,7 +104,7 @@ func (c *Container) copyFromArchive(ctx context.Context, path string, chown bool }, nil } -func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Writer) (func() error, error) { +func (c *Container) copyToArchive(path string, writer io.Writer) (func() error, error) { var ( mountPoint string unmount func() @@ -117,10 +121,14 @@ func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Wr if err != nil { return nil, err } - unmount = func() { c.unmount(false) } + unmount = func() { + if err := c.unmount(false); err != nil { + logrus.Errorf("Failed to unmount container: %v", err) + } + } } - statInfo, resolvedRoot, resolvedPath, err := c.stat(ctx, mountPoint, path) + statInfo, resolvedRoot, resolvedPath, err := c.stat(mountPoint, path) if err != nil { unmount() return nil, err @@ -164,7 +172,7 @@ func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Wr // container's user namespace. IgnoreUnreadable: rootless.IsRootless() && c.state.State == define.ContainerStateRunning, } - return c.joinMountAndExec(ctx, + return c.joinMountAndExec( func() error { return buildahCopiah.Get(resolvedRoot, "", getOptions, []string{resolvedPath}, writer) }, @@ -215,7 +223,7 @@ func idtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxI // container's file system. // // Note, if the container is not running `f()` will be executed as is. -func (c *Container) joinMountAndExec(ctx context.Context, f func() error) error { +func (c *Container) joinMountAndExec(f func() error) error { if c.state.State != define.ContainerStateRunning { return f() } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_exec.go b/vendor/github.com/containers/podman/v4/libpod/container_exec.go similarity index 91% rename from vendor/github.com/containers/podman/v3/libpod/container_exec.go rename to vendor/github.com/containers/podman/v4/libpod/container_exec.go index f99fb7d3f00..c05e7fd94d8 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_exec.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_exec.go @@ -9,11 +9,12 @@ import ( "strconv" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // ExecConfig contains the configuration of an exec session @@ -78,11 +79,11 @@ type ExecConfig struct { type ExecSession struct { // Id is the ID of the exec session. // Named somewhat strangely to not conflict with ID(). - // nolint:stylecheck,golint + // nolint:stylecheck,revive Id string `json:"id"` // ContainerId is the ID of the container this exec session belongs to. // Named somewhat strangely to not conflict with ContainerID(). - // nolint:stylecheck,golint + // nolint:stylecheck,revive ContainerId string `json:"containerId"` // State is the state of the exec session. @@ -340,22 +341,60 @@ func (c *Container) ExecStartAndAttach(sessionID string, streams *define.AttachS } lastErr = tmpErr - exitCode, err := c.readExecExitCode(session.ID()) - if err != nil { + exitCode, exitCodeErr := c.readExecExitCode(session.ID()) + + // Lock again. + // Important: we must lock and sync *before* the above error is handled. + // We need info from the database to handle the error. + if !c.batched { + c.lock.Lock() + } + // We can't reuse the old exec session (things may have changed from + // other use, the container was unlocked). + // So re-sync and get a fresh copy. + // If we can't do this, no point in continuing, any attempt to save + // would write garbage to the DB. + if err := c.syncContainer(); err != nil { + if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { + // We can't save status, but since the container has + // been entirely removed, we don't have to; exit cleanly + return lastErr + } if lastErr != nil { logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) } - lastErr = err - } + return errors.Wrapf(err, "error syncing container %s state to update exec session %s", c.ID(), sessionID) + } + + // Now handle the error from readExecExitCode above. + if exitCodeErr != nil { + newSess, ok := c.state.ExecSessions[sessionID] + if !ok { + // The exec session was removed entirely, probably by + // the cleanup process. When it did so, it should have + // written an event with the exit code. + // Given that, there's nothing more we can do. + logrus.Infof("Container %s exec session %s already removed", c.ID(), session.ID()) + return lastErr + } - logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + if newSess.State == define.ExecStateStopped { + // Exec session already cleaned up. + // Exit code should be recorded, so it's OK if we were + // not able to read it. + logrus.Infof("Container %s exec session %s already cleaned up", c.ID(), session.ID()) + return lastErr + } - // Lock again - if !c.batched { - c.lock.Lock() + if lastErr != nil { + logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) + } + lastErr = exitCodeErr } - if err := writeExecExitCode(c, session.ID(), exitCode); err != nil { + logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode) + + if err := justWriteExecExitCode(c, session.ID(), exitCode); err != nil { if lastErr != nil { logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr) } @@ -774,13 +813,40 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi return exitCode, nil } -// cleanup an exec session after its done -func (c *Container) cleanupExecBundle(sessionID string) error { - if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) { - return err +// cleanupExecBundle cleanups an exec session after its done +// Please be careful when using this function since it might temporarily unlock +// the container when os.RemoveAll($bundlePath) fails with ENOTEMPTY or EBUSY +// errors. +func (c *Container) cleanupExecBundle(sessionID string) (err error) { + path := c.execBundlePath(sessionID) + for attempts := 0; attempts < 50; attempts++ { + err = os.RemoveAll(path) + if err == nil || os.IsNotExist(err) { + return nil + } + if pathErr, ok := err.(*os.PathError); ok { + err = pathErr.Err + if errors.Cause(err) == unix.ENOTEMPTY || errors.Cause(err) == unix.EBUSY { + // give other processes a chance to use the container + if !c.batched { + if err := c.save(); err != nil { + return err + } + c.lock.Unlock() + } + time.Sleep(time.Millisecond * 100) + if !c.batched { + c.lock.Lock() + if err := c.syncContainer(); err != nil { + return err + } + } + continue + } + } + return } - - return nil + return } // the path to a containers exec session bundle diff --git a/vendor/github.com/containers/podman/v3/libpod/container_graph.go b/vendor/github.com/containers/podman/v4/libpod/container_graph.go similarity index 99% rename from vendor/github.com/containers/podman/v3/libpod/container_graph.go rename to vendor/github.com/containers/podman/v4/libpod/container_graph.go index 32fb264f162..eeb0f02fa46 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_graph.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_graph.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v3/libpod/container_inspect.go b/vendor/github.com/containers/podman/v4/libpod/container_inspect.go similarity index 86% rename from vendor/github.com/containers/podman/v3/libpod/container_inspect.go rename to vendor/github.com/containers/podman/v4/libpod/container_inspect.go index 277c3b96069..76e0e9e1324 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_inspect.go @@ -6,9 +6,10 @@ import ( "strings" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/driver" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/driver" + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/storage/types" units "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -50,6 +51,17 @@ func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) { return c.inspectLocked(size) } +func (c *Container) volumesFrom() ([]string, error) { + ctrSpec, err := c.specFromState() + if err != nil { + return nil, err + } + if ctrs, ok := ctrSpec.Annotations[define.InspectAnnotationVolumesFrom]; ok { + return strings.Split(ctrs, ","), nil + } + return nil, nil +} + func (c *Container) getContainerInspectData(size bool, driverData *define.DriverData) (*define.InspectContainerData, error) { config := c.config runtimeInfo := c.state @@ -91,35 +103,51 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver } } - namedVolumes, mounts := c.sortUserVolumes(ctrSpec) - inspectMounts, err := c.GetInspectMounts(namedVolumes, c.config.ImageVolumes, mounts) + namedVolumes, mounts := c.SortUserVolumes(ctrSpec) + inspectMounts, err := c.GetMounts(namedVolumes, c.config.ImageVolumes, mounts) if err != nil { return nil, err } + cgroupPath, err := c.cGroupPath() + if err != nil { + // Handle the case where the container is not running or has no cgroup. + if errors.Is(err, define.ErrNoCgroups) || errors.Is(err, define.ErrCtrStopped) { + cgroupPath = "" + } else { + return nil, err + } + } + data := &define.InspectContainerData{ ID: config.ID, Created: config.CreatedTime, Path: path, Args: args, State: &define.InspectContainerState{ - OciVersion: ctrSpec.Version, - Status: runtimeInfo.State.String(), - Running: runtimeInfo.State == define.ContainerStateRunning, - Paused: runtimeInfo.State == define.ContainerStatePaused, - OOMKilled: runtimeInfo.OOMKilled, - Dead: runtimeInfo.State.String() == "bad state", - Pid: runtimeInfo.PID, - ConmonPid: runtimeInfo.ConmonPID, - ExitCode: runtimeInfo.ExitCode, - Error: "", // can't get yet - StartedAt: runtimeInfo.StartedTime, - FinishedAt: runtimeInfo.FinishedTime, - Checkpointed: runtimeInfo.Checkpointed, + OciVersion: ctrSpec.Version, + Status: runtimeInfo.State.String(), + Running: runtimeInfo.State == define.ContainerStateRunning, + Paused: runtimeInfo.State == define.ContainerStatePaused, + OOMKilled: runtimeInfo.OOMKilled, + Dead: runtimeInfo.State.String() == "bad state", + Pid: runtimeInfo.PID, + ConmonPid: runtimeInfo.ConmonPID, + ExitCode: runtimeInfo.ExitCode, + Error: "", // can't get yet + StartedAt: runtimeInfo.StartedTime, + FinishedAt: runtimeInfo.FinishedTime, + Checkpointed: runtimeInfo.Checkpointed, + CgroupPath: cgroupPath, + RestoredAt: runtimeInfo.RestoredTime, + CheckpointedAt: runtimeInfo.CheckpointedTime, + Restored: runtimeInfo.Restored, + CheckpointPath: runtimeInfo.CheckpointPath, + CheckpointLog: runtimeInfo.CheckpointLog, + RestoreLog: runtimeInfo.RestoreLog, }, Image: config.RootfsImageID, ImageName: config.RootfsImageName, - ExitCommand: config.ExitCommand, Namespace: config.Namespace, Rootfs: config.Rootfs, Pod: config.Pod, @@ -194,7 +222,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver // Get inspect-formatted mounts list. // Only includes user-specified mounts. Only includes bind mounts and named // volumes, not tmpfs volumes. -func (c *Container) GetInspectMounts(namedVolumes []*ContainerNamedVolume, imageVolumes []*ContainerImageVolume, mounts []spec.Mount) ([]define.InspectMount, error) { +func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes []*ContainerImageVolume, mounts []spec.Mount) ([]define.InspectMount, error) { inspectMounts := []define.InspectMount{} // No mounts, return early @@ -257,6 +285,27 @@ func (c *Container) GetInspectMounts(namedVolumes []*ContainerNamedVolume, image return inspectMounts, nil } +// GetSecurityOptions retrieves and returns the security related annotations and process information upon inspection +func (c *Container) GetSecurityOptions() []string { + ctrSpec := c.config.Spec + SecurityOpt := []string{} + if ctrSpec.Process != nil { + if ctrSpec.Process.NoNewPrivileges { + SecurityOpt = append(SecurityOpt, "no-new-privileges") + } + } + if label, ok := ctrSpec.Annotations[define.InspectAnnotationLabel]; ok { + SecurityOpt = append(SecurityOpt, fmt.Sprintf("label=%s", label)) + } + if seccomp, ok := ctrSpec.Annotations[define.InspectAnnotationSeccomp]; ok { + SecurityOpt = append(SecurityOpt, fmt.Sprintf("seccomp=%s", seccomp)) + } + if apparmor, ok := ctrSpec.Annotations[define.InspectAnnotationApparmor]; ok { + SecurityOpt = append(SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor)) + } + return SecurityOpt +} + // Parse mount options so we can populate them in the mount structure. // The mount passed in will be modified. func parseMountOptionsForInspect(options []string, mount *define.InspectMount) { @@ -300,8 +349,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.User = c.config.User if spec.Process != nil { ctrConfig.Tty = spec.Process.Terminal - ctrConfig.Env = []string{} - ctrConfig.Env = append(ctrConfig.Env, spec.Process.Env...) + ctrConfig.Env = append([]string{}, spec.Process.Env...) ctrConfig.WorkingDir = spec.Process.Cwd } @@ -309,7 +357,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.Timeout = c.config.Timeout ctrConfig.OpenStdin = c.config.Stdin ctrConfig.Image = c.config.RootfsImageName - ctrConfig.SystemdMode = c.config.Systemd + ctrConfig.SystemdMode = c.Systemd() // Leave empty is not explicitly overwritten by user if len(c.config.Command) != 0 { @@ -362,9 +410,23 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.Umask = c.config.Umask } + ctrConfig.Passwd = c.config.Passwd + ctrConfig.ChrootDirs = append(ctrConfig.ChrootDirs, c.config.ChrootDirs...) + return ctrConfig } +func generateIDMappings(idMappings types.IDMappingOptions) *define.InspectIDMappings { + var inspectMappings define.InspectIDMappings + for _, uid := range idMappings.UIDMap { + inspectMappings.UIDMap = append(inspectMappings.UIDMap, fmt.Sprintf("%d:%d:%d", uid.ContainerID, uid.HostID, uid.Size)) + } + for _, gid := range idMappings.GIDMap { + inspectMappings.GIDMap = append(inspectMappings.GIDMap, fmt.Sprintf("%d:%d:%d", gid.ContainerID, gid.HostID, gid.Size)) + } + return &inspectMappings +} + // Generate the InspectContainerHostConfig struct for the HostConfig field of // Inspect. func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) (*define.InspectContainerHostConfig, error) { @@ -405,16 +467,14 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named hostConfig.GroupAdd = make([]string, 0, len(c.config.Groups)) hostConfig.GroupAdd = append(hostConfig.GroupAdd, c.config.Groups...) - hostConfig.SecurityOpt = []string{} if ctrSpec.Process != nil { if ctrSpec.Process.OOMScoreAdj != nil { hostConfig.OomScoreAdj = *ctrSpec.Process.OOMScoreAdj } - if ctrSpec.Process.NoNewPrivileges { - hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, "no-new-privileges") - } } + hostConfig.SecurityOpt = c.GetSecurityOptions() + hostConfig.ReadonlyRootfs = ctrSpec.Root.Readonly hostConfig.ShmSize = c.config.ShmSize hostConfig.Runtime = "oci" @@ -439,15 +499,6 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named if ctrSpec.Annotations[define.InspectAnnotationInit] == define.InspectResponseTrue { hostConfig.Init = true } - if label, ok := ctrSpec.Annotations[define.InspectAnnotationLabel]; ok { - hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("label=%s", label)) - } - if seccomp, ok := ctrSpec.Annotations[define.InspectAnnotationSeccomp]; ok { - hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("seccomp=%s", seccomp)) - } - if apparmor, ok := ctrSpec.Annotations[define.InspectAnnotationApparmor]; ok { - hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor)) - } } // Resource limits @@ -476,9 +527,6 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named if ctrSpec.Linux.Resources.Memory.Limit != nil { hostConfig.Memory = *ctrSpec.Linux.Resources.Memory.Limit } - if ctrSpec.Linux.Resources.Memory.Kernel != nil { - hostConfig.KernelMemory = *ctrSpec.Linux.Resources.Memory.Kernel - } if ctrSpec.Linux.Resources.Memory.Reservation != nil { hostConfig.MemoryReservation = *ctrSpec.Linux.Resources.Memory.Reservation } @@ -655,32 +703,31 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named } hostConfig.CapAdd = capAdd hostConfig.CapDrop = capDrop - - // IPC Namespace mode - ipcMode := "" - if c.config.IPCNsCtr != "" { - ipcMode = fmt.Sprintf("container:%s", c.config.IPCNsCtr) - } else if ctrSpec.Linux != nil { + switch { + case c.config.IPCNsCtr != "": + hostConfig.IpcMode = fmt.Sprintf("container:%s", c.config.IPCNsCtr) + case ctrSpec.Linux != nil: // Locate the spec's IPC namespace. // If there is none, it's ipc=host. // If there is one and it has a path, it's "ns:". // If no path, it's default - the empty string. - for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.IPCNamespace { if ns.Path != "" { - ipcMode = fmt.Sprintf("ns:%s", ns.Path) + hostConfig.IpcMode = fmt.Sprintf("ns:%s", ns.Path) } else { - ipcMode = "private" + break } - break } } - if ipcMode == "" { - ipcMode = "host" - } + case c.config.NoShm: + hostConfig.IpcMode = "none" + case c.config.NoShmShare: + hostConfig.IpcMode = "private" + } + if hostConfig.IpcMode == "" { + hostConfig.IpcMode = "shareable" } - hostConfig.IpcMode = ipcMode // Cgroup namespace mode cgroupMode := "" @@ -706,7 +753,7 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named } hostConfig.CgroupMode = cgroupMode - // CGroup parent + // Cgroup parent // Need to check if it's the default, and not print if so. defaultCgroupParent := "" switch c.CgroupManager() { @@ -791,12 +838,14 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named } } hostConfig.UsernsMode = usernsMode - + if c.config.IDMappings.UIDMap != nil && c.config.IDMappings.GIDMap != nil { + hostConfig.IDMappings = generateIDMappings(c.config.IDMappings) + } // Devices // Do not include if privileged - assumed that all devices will be // included. var err error - hostConfig.Devices, err = c.GetDevices(*&hostConfig.Privileged, *ctrSpec, deviceNodes) + hostConfig.Devices, err = c.GetDevices(hostConfig.Privileged, *ctrSpec, deviceNodes) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_internal.go b/vendor/github.com/containers/podman/v4/libpod/container_internal.go similarity index 87% rename from vendor/github.com/containers/podman/v3/libpod/container_internal.go rename to vendor/github.com/containers/podman/v4/libpod/container_internal.go index 3f9738411e8..5c6719bdf3b 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_internal.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_internal.go @@ -1,7 +1,6 @@ package libpod import ( - "bufio" "bytes" "context" "fmt" @@ -17,18 +16,23 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/pkg/overlay" butil "github.com/containers/buildah/util" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/ctime" - "github.com/containers/podman/v3/pkg/hooks" - "github.com/containers/podman/v3/pkg/hooks/exec" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/selinux" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/common/pkg/chown" + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/ctime" + "github.com/containers/podman/v4/pkg/hooks" + "github.com/containers/podman/v4/pkg/hooks/exec" + "github.com/containers/podman/v4/pkg/lookup" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/selinux" + "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/mount" "github.com/coreos/go-systemd/v22/daemon" securejoin "github.com/cyphar/filepath-securejoin" @@ -37,6 +41,7 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) const ( @@ -96,15 +101,8 @@ func (c *Container) rootFsSize() (int64, error) { // rwSize gets the size of the mutable top layer of the container. func (c *Container) rwSize() (int64, error) { if c.config.Rootfs != "" { - var size int64 - err := filepath.Walk(c.config.Rootfs, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - size += info.Size() - return nil - }) - return size, err + size, err := util.SizeOfPath(c.config.Rootfs) + return int64(size), err } container, err := c.runtime.store.Container(c.ID()) @@ -136,6 +134,11 @@ func (c *Container) ControlSocketPath() string { return filepath.Join(c.bundlePath(), "ctl") } +// CheckpointVolumesPath returns the path to the directory containing the checkpointed volumes +func (c *Container) CheckpointVolumesPath() string { + return filepath.Join(c.bundlePath(), metadata.CheckpointVolumesDirectory) +} + // CheckpointPath returns the path to the directory containing the checkpoint func (c *Container) CheckpointPath() string { return filepath.Join(c.bundlePath(), metadata.CheckpointDirectory) @@ -263,7 +266,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { return false, nil } else if c.state.State == define.ContainerStateUnknown { - return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!") + return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt") } c.newContainerEvent(events.Restart) @@ -288,7 +291,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err // setup slirp4netns again because slirp4netns will die when conmon exits if c.config.NetMode.IsSlirp4netns() { - err := c.runtime.setupSlirp4netns(c) + err := c.runtime.setupSlirp4netns(c, c.state.NetNS) if err != nil { return false, err } @@ -297,7 +300,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err // setup rootlesskit port forwarder again since it dies when conmon exits // we use rootlesskit port forwarder only as rootless and when bridge network is used if rootless.IsRootless() && c.config.NetMode.IsBridge() && len(c.config.PortMappings) > 0 { - err := c.runtime.setupRootlessPortMappingViaRLK(c, c.state.NetNS.Path()) + err := c.runtime.setupRootlessPortMappingViaRLK(c, c.state.NetNS.Path(), c.state.NetworkStatus) if err != nil { return false, err } @@ -443,10 +446,15 @@ func (c *Container) setupStorage(ctx context.Context) error { }, LabelOpts: c.config.LabelOpts, } - if c.restoreFromCheckpoint && !c.config.Privileged { - // If restoring from a checkpoint, the root file-system - // needs to be mounted with the same SELinux labels as - // it was mounted previously. + + options.StorageOpt = c.config.StorageOpts + + if c.restoreFromCheckpoint && c.config.ProcessLabel != "" && c.config.MountLabel != "" { + // If restoring from a checkpoint, the root file-system needs + // to be mounted with the same SELinux labels as it was mounted + // previously. But only if both labels have been set. For + // privileged containers or '--ipc host' only ProcessLabel will + // be set and so we will skip it for cases like that. if options.Flags == nil { options.Flags = make(map[string]interface{}) } @@ -480,13 +488,35 @@ func (c *Container) setupStorage(ctx context.Context) error { c.setupStorageMapping(&options.IDMappingOptions, &c.config.IDMappings) - containerInfo, err := c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options) - if err != nil { - return errors.Wrapf(err, "error creating container storage") + // Unless the user has specified a name, use a randomly generated one. + // Note that name conflicts may occur (see #11735), so we need to loop. + generateName := c.config.Name == "" + var containerInfo ContainerInfo + var containerInfoErr error + for { + if generateName { + name, err := c.runtime.generateName() + if err != nil { + return err + } + c.config.Name = name + } + containerInfo, containerInfoErr = c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options) + + if !generateName || errors.Cause(containerInfoErr) != storage.ErrDuplicateName { + break + } + } + if containerInfoErr != nil { + return errors.Wrapf(containerInfoErr, "error creating container storage") } - c.config.IDMappings.UIDMap = containerInfo.UIDMap - c.config.IDMappings.GIDMap = containerInfo.GIDMap + // only reconfig IDMappings if layer was mounted from storage + // if its a external overlay do not reset IDmappings + if !c.config.RootfsOverlay { + c.config.IDMappings.UIDMap = containerInfo.UIDMap + c.config.IDMappings.GIDMap = containerInfo.GIDMap + } processLabel, err := c.processLabel(containerInfo.ProcessLabel) if err != nil { @@ -527,7 +557,7 @@ func (c *Container) setupStorage(ctx context.Context) error { } func (c *Container) processLabel(processLabel string) (string, error) { - if !c.config.Systemd && !c.ociRuntime.SupportsKVM() { + if !c.Systemd() && !c.ociRuntime.SupportsKVM() { return processLabel, nil } ctrSpec, err := c.specFromState() @@ -539,7 +569,7 @@ func (c *Container) processLabel(processLabel string) (string, error) { switch { case c.ociRuntime.SupportsKVM(): return selinux.KVMLabel(processLabel) - case c.config.Systemd: + case c.Systemd(): return selinux.InitLabel(processLabel) } } @@ -595,6 +625,12 @@ func resetState(state *ContainerState) { state.RestartPolicyMatch = false state.RestartCount = 0 state.Checkpointed = false + state.Restored = false + state.CheckpointedTime = time.Time{} + state.RestoredTime = time.Time{} + state.CheckpointPath = "" + state.CheckpointLog = "" + state.RestoreLog = "" } // Refresh refreshes the container's state after a restart. @@ -647,6 +683,19 @@ func (c *Container) refresh() error { c.state.NetworkStatus = nil c.state.NetworkStatusOld = nil + // Rewrite the config if necessary. + // Podman 4.0 uses a new port format in the config. + // getContainerConfigFromDB() already converted the old ports to the new one + // but it did not write the config to the db back for performance reasons. + // If a rewrite must happen the config.rewrite field is set to true. + if c.config.rewrite { + // SafeRewriteContainerConfig must be used with care. Make sure to not change config fields by accident. + if err := c.runtime.state.SafeRewriteContainerConfig(c, "", "", c.config); err != nil { + return errors.Wrapf(err, "failed to rewrite the config for container %s", c.config.ID) + } + c.config.rewrite = false + } + if err := c.save(); err != nil { return errors.Wrapf(err, "error refreshing state for container %s", c.ID()) } @@ -704,7 +753,7 @@ func (c *Container) export(path string) error { if !c.state.Mounted { containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel) if err != nil { - return errors.Wrapf(err, "error mounting container %q", c.ID()) + return errors.Wrapf(err, "mounting container %q", c.ID()) } mountPoint = containerMount defer func() { @@ -948,9 +997,6 @@ func (c *Container) completeNetworkSetup() error { if err := c.syncContainer(); err != nil { return err } - if c.config.NetMode.IsSlirp4netns() { - return c.runtime.setupSlirp4netns(c) - } if err := c.runtime.setupNetNS(c); err != nil { return err } @@ -962,17 +1008,14 @@ func (c *Container) completeNetworkSetup() error { } } // check if we have a bindmount for /etc/hosts - if hostsBindMount, ok := state.BindMounts["/etc/hosts"]; ok && len(c.cniHosts()) > 0 { - ctrHostPath := filepath.Join(c.state.RunDir, "hosts") - if hostsBindMount == ctrHostPath { - // read the existing hosts - b, err := ioutil.ReadFile(hostsBindMount) - if err != nil { - return err - } - if err := ioutil.WriteFile(hostsBindMount, append(b, []byte(c.cniHosts())...), 0644); err != nil { - return err - } + if hostsBindMount, ok := state.BindMounts[config.DefaultHostsFile]; ok { + entries, err := c.getHostsEntries() + if err != nil { + return err + } + // add new container ips to the hosts file + if err := etchosts.Add(hostsBindMount, entries); err != nil { + return err } } @@ -997,18 +1040,6 @@ func (c *Container) completeNetworkSetup() error { return ioutil.WriteFile(resolvBindMount, []byte(strings.Join(outResolvConf, "\n")), 0644) } -func (c *Container) cniHosts() string { - var hosts string - for _, status := range c.getNetworkStatus() { - for _, netInt := range status.Interfaces { - for _, netAddress := range netInt.Networks { - hosts += fmt.Sprintf("%s\t%s %s\n", netAddress.Subnet.IP.String(), c.Hostname(), c.config.Name) - } - } - } - return hosts -} - // Initialize a container, creating it in the runtime func (c *Container) init(ctx context.Context, retainRetries bool) error { // Unconditionally remove conmon temporary files. @@ -1040,14 +1071,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { } // With the spec complete, do an OCI create - if err := c.ociRuntime.CreateContainer(c, nil); err != nil { - // Fedora 31 is carrying a patch to display improved error - // messages to better handle the V2 transition. This is NOT - // upstream in any OCI runtime. - // TODO: Remove once runc supports cgroupsv2 - if strings.Contains(err.Error(), "this version of runc doesn't work on cgroups v2") { - logrus.Errorf("Oci runtime %q does not support CGroups V2: use system migrate to mitigate", c.ociRuntime.Name()) - } + if _, err = c.ociRuntime.CreateContainer(c, nil); err != nil { return err } @@ -1062,6 +1086,12 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { } c.state.Checkpointed = false + c.state.Restored = false + c.state.CheckpointedTime = time.Time{} + c.state.RestoredTime = time.Time{} + c.state.CheckpointPath = "" + c.state.CheckpointLog = "" + c.state.RestoreLog = "" c.state.ExitCode = 0 c.state.Exited = false c.state.State = define.ContainerStateCreated @@ -1216,7 +1246,10 @@ func (c *Container) start() error { } } - if c.config.HealthCheckConfig != nil { + // Check if healthcheck is not nil and --no-healthcheck option is not set. + // If --no-healthcheck is set Test will be always set to `[NONE]` so no need + // to update status in such case. + if c.config.HealthCheckConfig != nil && !(len(c.config.HealthCheckConfig.Test) == 1 && c.config.HealthCheckConfig.Test[0] == "NONE") { if err := c.updateHealthStatus(define.HealthCheckStarting); err != nil { logrus.Error(err) } @@ -1239,8 +1272,8 @@ func (c *Container) stop(timeout uint) error { // a pid namespace then the OCI Runtime needs to kill ALL processes in // the containers cgroup in order to make sure the container is stopped. all := !c.hasNamespace(spec.PIDNamespace) - // We can't use --all if CGroups aren't present. - // Rootless containers with CGroups v1 and NoCgroups are both cases + // We can't use --all if Cgroups aren't present. + // Rootless containers with Cgroups v1 and NoCgroups are both cases // where this can happen. if all { if c.config.NoCgroups { @@ -1348,7 +1381,7 @@ func (c *Container) stop(timeout uint) error { // Internal, non-locking function to pause a container func (c *Container) pause() error { if c.config.NoCgroups { - return errors.Wrapf(define.ErrNoCgroups, "cannot pause without using CGroups") + return errors.Wrapf(define.ErrNoCgroups, "cannot pause without using Cgroups") } if rootless.IsRootless() { @@ -1376,7 +1409,7 @@ func (c *Container) pause() error { // Internal, non-locking function to unpause a container func (c *Container) unpause() error { if c.config.NoCgroups { - return errors.Wrapf(define.ErrNoCgroups, "cannot unpause without using CGroups") + return errors.Wrapf(define.ErrNoCgroups, "cannot unpause without using Cgroups") } if err := c.ociRuntime.UnpauseContainer(c); err != nil { @@ -1466,26 +1499,28 @@ func (c *Container) mountStorage() (_ string, deferredErr error) { return c.state.Mountpoint, nil } - mounted, err := mount.Mounted(c.config.ShmDir) - if err != nil { - return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir) - } - - if !mounted && !MountExists(c.config.Spec.Mounts, "/dev/shm") { - shmOptions := fmt.Sprintf("mode=1777,size=%d", c.config.ShmSize) - if err := c.mountSHM(shmOptions); err != nil { - return "", err - } - if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil { - return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir) + if !c.config.NoShm { + mounted, err := mount.Mounted(c.config.ShmDir) + if err != nil { + return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir) } - defer func() { - if deferredErr != nil { - if err := c.unmountSHM(c.config.ShmDir); err != nil { - logrus.Errorf("Unmounting SHM for container %s after mount error: %v", c.ID(), err) - } + + if !mounted && !MountExists(c.config.Spec.Mounts, "/dev/shm") { + shmOptions := fmt.Sprintf("mode=1777,size=%d", c.config.ShmSize) + if err := c.mountSHM(shmOptions); err != nil { + return "", err } - }() + if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil { + return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir) + } + defer func() { + if deferredErr != nil { + if err := c.unmountSHM(c.config.ShmDir); err != nil { + logrus.Errorf("Unmounting SHM for container %s after mount error: %v", c.ID(), err) + } + } + }() + } } // We need to mount the container before volumes - to ensure the copyup @@ -1493,8 +1528,8 @@ func (c *Container) mountStorage() (_ string, deferredErr error) { mountPoint := c.config.Rootfs // Check if overlay has to be created on top of Rootfs if c.config.RootfsOverlay { - overlayDest := c.runtime.store.GraphRoot() - contentDir, err := overlay.GenerateStructure(c.runtime.store.GraphRoot(), c.ID(), "rootfs", c.RootUID(), c.RootGID()) + overlayDest := c.runtime.RunRoot() + contentDir, err := overlay.GenerateStructure(overlayDest, c.ID(), "rootfs", c.RootUID(), c.RootGID()) if err != nil { return "", errors.Wrapf(err, "rootfs-overlay: failed to create TempDir in the %s directory", overlayDest) } @@ -1515,6 +1550,19 @@ func (c *Container) mountStorage() (_ string, deferredErr error) { } mountPoint = overlayMount.Source + execUser, err := lookup.GetUserGroupInfo(mountPoint, c.config.User, nil) + if err != nil { + return "", err + } + hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), uint32(execUser.Uid), uint32(execUser.Gid)) + if err != nil { + return "", errors.Wrap(err, "unable to get host UID and host GID") + } + + //note: this should not be recursive, if using external rootfs users should be responsible on configuring ownership. + if err := chown.ChangeHostPathOwnership(mountPoint, false, int(hostUID), int(hostGID)); err != nil { + return "", err + } } if mountPoint == "" { @@ -1531,14 +1579,49 @@ func (c *Container) mountStorage() (_ string, deferredErr error) { }() } + rootUID, rootGID := c.RootUID(), c.RootGID() + + dirfd, err := unix.Open(mountPoint, unix.O_RDONLY|unix.O_PATH, 0) + if err != nil { + return "", errors.Wrap(err, "open mount point") + } + defer unix.Close(dirfd) + + err = unix.Mkdirat(dirfd, "etc", 0755) + if err != nil && !os.IsExist(err) { + return "", errors.Wrap(err, "create /etc") + } + // If the etc directory was created, chown it to root in the container + if err == nil && (rootUID != 0 || rootGID != 0) { + err = unix.Fchownat(dirfd, "etc", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + return "", errors.Wrap(err, "chown /etc") + } + } + + etcInTheContainerPath, err := securejoin.SecureJoin(mountPoint, "etc") + if err != nil { + return "", errors.Wrap(err, "resolve /etc in the container") + } + + etcInTheContainerFd, err := unix.Open(etcInTheContainerPath, unix.O_RDONLY|unix.O_PATH, 0) + if err != nil { + return "", errors.Wrap(err, "open /etc in the container") + } + defer unix.Close(etcInTheContainerFd) + // If /etc/mtab does not exist in container image, then we need to // create it, so that mount command within the container will work. - mtab := filepath.Join(mountPoint, "/etc/mtab") - if err := idtools.MkdirAllAs(filepath.Dir(mtab), 0755, c.RootUID(), c.RootGID()); err != nil { - return "", errors.Wrap(err, "error creating mtab directory") + err = unix.Symlinkat("/proc/mounts", etcInTheContainerFd, "mtab") + if err != nil && !os.IsExist(err) { + return "", errors.Wrap(err, "creating /etc/mtab symlink") } - if err = os.Symlink("/proc/mounts", mtab); err != nil && !os.IsExist(err) { - return "", err + // If the symlink was created, then also chown it to root in the container + if err == nil && (rootUID != 0 || rootGID != 0) { + err = unix.Fchownat(etcInTheContainerFd, "mtab", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + return "", errors.Wrap(err, "chown /etc/mtab") + } } // Request a mount of all named volumes @@ -1591,13 +1674,6 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) if vol.state.NeedsCopyUp { logrus.Debugf("Copying up contents from container %s to volume %s", c.ID(), vol.Name()) - // Set NeedsCopyUp to false immediately, so we don't try this - // again when there are already files copied. - vol.state.NeedsCopyUp = false - if err := vol.save(); err != nil { - return nil, err - } - // If the volume is not empty, we should not copy up. volMount := vol.mountPoint() contents, err := ioutil.ReadDir(volMount) @@ -1644,6 +1720,13 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) return vol, nil } + // Set NeedsCopyUp to false since we are about to do first copy + // Do not copy second time. + vol.state.NeedsCopyUp = false + if err := vol.save(); err != nil { + return nil, err + } + // Buildah Copier accepts a reader, so we'll need a pipe. reader, writer := io.Pipe() defer reader.Close() @@ -1690,13 +1773,27 @@ func (c *Container) cleanupStorage() error { var cleanupErr error + markUnmounted := func() { + c.state.Mountpoint = "" + c.state.Mounted = false + + if c.valid { + if err := c.save(); err != nil { + if cleanupErr != nil { + logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr) + } + cleanupErr = err + } + } + } + // umount rootfs overlay if it was created if c.config.RootfsOverlay { - overlayBasePath := c.runtime.store.GraphRoot() - overlayBasePath = filepath.Join(overlayBasePath, "rootfs") + overlayBasePath := filepath.Dir(c.state.Mountpoint) if err := overlay.Unmount(overlayBasePath); err != nil { - // If the container can't remove content report the error - logrus.Errorf("Failed to cleanup overlay mounts for %s: %v", c.ID(), err) + if cleanupErr != nil { + logrus.Errorf("Failed to cleanup overlay mounts for %s: %v", c.ID(), err) + } cleanupErr = err } } @@ -1717,6 +1814,7 @@ func (c *Container) cleanupStorage() error { } if c.config.Rootfs != "" { + markUnmounted() return cleanupErr } @@ -1761,21 +1859,11 @@ func (c *Container) cleanupStorage() error { } } - c.state.Mountpoint = "" - c.state.Mounted = false - - if c.valid { - if err := c.save(); err != nil { - if cleanupErr != nil { - logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr) - } - cleanupErr = err - } - } + markUnmounted() return cleanupErr } -// Unmount the a container and free its resources +// Unmount the container and free its resources func (c *Container) cleanup(ctx context.Context) error { var lastError error @@ -1783,7 +1871,7 @@ func (c *Container) cleanup(ctx context.Context) error { // Remove healthcheck unit/timer file if it execs if c.config.HealthCheckConfig != nil { - if err := c.removeTimer(); err != nil { + if err := c.removeTransientFiles(ctx); err != nil { logrus.Errorf("Removing timer for container %s healthcheck: %v", c.ID(), err) } } @@ -1793,6 +1881,24 @@ func (c *Container) cleanup(ctx context.Context) error { lastError = errors.Wrapf(err, "error removing container %s network", c.ID()) } + // cleanup host entry if it is shared + if c.config.NetNsCtr != "" { + if hoststFile, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { + if _, err := os.Stat(hoststFile); err == nil { + // we cannot use the dependency container lock due ABBA deadlocks + if lock, err := lockfile.GetLockfile(hoststFile); err == nil { + lock.Lock() + // make sure to ignore ENOENT error in case the netns container was cleanup before this one + if err := etchosts.Remove(hoststFile, getLocalhostHostEntry(c)); err != nil && !errors.Is(err, os.ErrNotExist) { + // this error is not fatal we still want to do proper cleanup + logrus.Errorf("failed to remove hosts entry from the netns containers /etc/hosts: %v", err) + } + lock.Unlock() + } + } + } + } + // Remove the container from the runtime, if necessary. // Do this *before* unmounting storage - some runtimes (e.g. Kata) // apparently object to having storage removed while the container still @@ -1929,33 +2035,6 @@ func (c *Container) writeStringToStaticDir(filename, contents string) (string, e return destFileName, nil } -// appendStringToRunDir appends the provided string to the runtimedir file -func (c *Container) appendStringToRunDir(destFile, output string) (string, error) { - destFileName := filepath.Join(c.state.RunDir, destFile) - - f, err := os.OpenFile(destFileName, os.O_APPEND|os.O_RDWR, 0600) - if err != nil { - return "", err - } - defer f.Close() - - compareStr := strings.TrimRight(output, "\n") - scanner := bufio.NewScanner(f) - scanner.Split(bufio.ScanLines) - - for scanner.Scan() { - if strings.Compare(scanner.Text(), compareStr) == 0 { - return filepath.Join(c.state.RunDir, destFile), nil - } - } - - if _, err := f.WriteString(output); err != nil { - return "", errors.Wrapf(err, "unable to write %s", destFileName) - } - - return filepath.Join(c.state.RunDir, destFile), nil -} - // saveSpec saves the OCI spec to disk, replacing any existing specs for the container func (c *Container) saveSpec(spec *spec.Spec) error { // If the OCI spec already exists, we need to replace it @@ -2079,7 +2158,7 @@ func (c *Container) checkReadyForRemoval() error { return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID()) } - if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { + if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) && !c.IsInfra() { return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String()) } @@ -2104,6 +2183,24 @@ func (c *Container) canWithPrevious() error { // prepareCheckpointExport writes the config and spec to // JSON files for later export func (c *Container) prepareCheckpointExport() error { + networks, err := c.networks() + if err != nil { + return err + } + // make sure to exclude the short ID alias since the container gets a new ID on restore + for net, opts := range networks { + newAliases := make([]string, 0, len(opts.Aliases)) + for _, alias := range opts.Aliases { + if alias != c.config.ID[:12] { + newAliases = append(newAliases, alias) + } + } + opts.Aliases = newAliases + networks[net] = opts + } + + // add the networks from the db to the config so that the exported checkpoint still stores all current networks + c.config.Networks = networks // save live config if _, err := metadata.WriteJSONFile(c.config, c.bundlePath(), metadata.ConfigDumpFile); err != nil { return err @@ -2123,9 +2220,9 @@ func (c *Container) prepareCheckpointExport() error { return nil } -// sortUserVolumes sorts the volumes specified for a container +// SortUserVolumes sorts the volumes specified for a container // between named and normal volumes -func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) { +func (c *Container) SortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) { namedUserVolumes := []*ContainerNamedVolume{} userMounts := []spec.Mount{} diff --git a/vendor/github.com/containers/podman/v3/libpod/container_internal_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go similarity index 68% rename from vendor/github.com/containers/podman/v3/libpod/container_internal_linux.go rename to vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go index 867ecc2ada0..4742b22abcf 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_internal_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go @@ -21,35 +21,41 @@ import ( "time" metadata "github.com/checkpoint-restore/checkpointctl/lib" - cdi "github.com/container-orchestrated-devices/container-device-interface/pkg" + "github.com/checkpoint-restore/go-criu/v5/stats" + cdi "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/buildah" "github.com/containers/buildah/pkg/chrootuser" "github.com/containers/buildah/pkg/overlay" butil "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/chown" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/subscriptions" "github.com/containers/common/pkg/umask" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/annotations" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/checkpoint/crutils" - "github.com/containers/podman/v3/pkg/criu" - "github.com/containers/podman/v3/pkg/lookup" - "github.com/containers/podman/v3/pkg/resolvconf" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" - "github.com/containers/podman/v3/utils" - "github.com/containers/podman/v3/version" + is "github.com/containers/image/v5/storage" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/annotations" + "github.com/containers/podman/v4/pkg/checkpoint/crutils" + "github.com/containers/podman/v4/pkg/criu" + "github.com/containers/podman/v4/pkg/lookup" + "github.com/containers/podman/v4/pkg/resolvconf" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v4/version" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" securejoin "github.com/cyphar/filepath-securejoin" runcuser "github.com/opencontainers/runc/libcontainer/user" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" + "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -106,15 +112,6 @@ func (c *Container) prepare() error { c.state.NetNS = netNS c.state.NetworkStatus = networkStatus } - - // handle rootless network namespace setup - if noNetNS && !c.config.PostConfigureNetNS { - if rootless.IsRootless() { - createNetNSErr = c.runtime.setupRootlessNetNS(c) - } else if c.config.NetMode.IsSlirp4netns() { - createNetNSErr = c.runtime.setupSlirp4netns(c) - } - } }() // Mount storage if not mounted go func() { @@ -180,6 +177,57 @@ func (c *Container) prepare() error { return nil } +// isWorkDirSymlink returns true if resolved workdir is symlink or a chain of symlinks, +// and final resolved target is present either on volume, mount or inside of container +// otherwise it returns false. Following function is meant for internal use only and +// can change at any point of time. +func (c *Container) isWorkDirSymlink(resolvedPath string) bool { + // We cannot create workdir since explicit --workdir is + // set in config but workdir could also be a symlink. + // If its a symlink lets check if resolved link is present + // on the container or not. + + // If we can resolve symlink and resolved link is present on the container + // then return nil cause its a valid use-case. + + maxSymLinks := 0 + for { + // Linux only supports a chain of 40 links. + // Reference: https://github.com/torvalds/linux/blob/master/include/linux/namei.h#L13 + if maxSymLinks > 40 { + break + } + resolvedSymlink, err := os.Readlink(resolvedPath) + if err != nil { + // End sym-link resolution loop. + break + } + if resolvedSymlink != "" { + _, resolvedSymlinkWorkdir, err := c.resolvePath(c.state.Mountpoint, resolvedSymlink) + if isPathOnVolume(c, resolvedSymlinkWorkdir) || isPathOnBindMount(c, resolvedSymlinkWorkdir) { + // Resolved symlink exists on external volume or mount + return true + } + if err != nil { + // Could not resolve path so end sym-link resolution loop. + break + } + if resolvedSymlinkWorkdir != "" { + resolvedPath = resolvedSymlinkWorkdir + _, err := os.Stat(resolvedSymlinkWorkdir) + if err == nil { + // Symlink resolved successfully and resolved path exists on container, + // this is a valid use-case so return nil. + logrus.Debugf("Workdir is a symlink with target to %q and resolved symlink exists on container", resolvedSymlink) + return true + } + } + } + maxSymLinks++ + } + return false +} + // resolveWorkDir resolves the container's workdir and, depending on the // configuration, will create it, or error out if it does not exist. // Note that the container must be mounted before. @@ -212,6 +260,11 @@ func (c *Container) resolveWorkDir() error { // the path exists on the container. if err != nil { if os.IsNotExist(err) { + // If resolved Workdir path gets marked as a valid symlink, + // return nil cause this is valid use-case. + if c.isWorkDirSymlink(resolvedWorkdir) { + return nil + } return errors.Errorf("workdir %q does not exist on container %s", workdir, c.ID()) } // This might be a serious error (e.g., permission), so @@ -312,16 +365,46 @@ func (c *Container) getUserOverrides() *lookup.Overrides { return &overrides } +func lookupHostUser(name string) (*runcuser.ExecUser, error) { + var execUser runcuser.ExecUser + // Lookup User on host + u, err := util.LookupUser(name) + if err != nil { + return &execUser, err + } + uid, err := strconv.ParseUint(u.Uid, 8, 32) + if err != nil { + return &execUser, err + } + + gid, err := strconv.ParseUint(u.Gid, 8, 32) + if err != nil { + return &execUser, err + } + execUser.Uid = int(uid) + execUser.Gid = int(gid) + execUser.Home = u.HomeDir + return &execUser, nil +} + // Generate spec for a container // Accepts a map of the container's dependencies func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { overrides := c.getUserOverrides() execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides) if err != nil { - return nil, err + if util.StringInSlice(c.config.User, c.config.HostUsers) { + execUser, err = lookupHostUser(c.config.User) + } + if err != nil { + return nil, err + } } - g := generate.Generator{Config: c.config.Spec} + // NewFromSpec() is deprecated according to its comment + // however the recommended replace just causes a nil map panic + //nolint:staticcheck + g := generate.NewFromSpec(c.config.Spec) // If network namespace was requested, add it now if c.config.CreateNetNS { @@ -371,30 +454,63 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { } overlayFlag := false + upperDir := "" + workDir := "" for _, o := range namedVol.Options { if o == "O" { overlayFlag = true } + if overlayFlag && strings.Contains(o, "upperdir") { + splitOpt := strings.SplitN(o, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] + if upperDir == "" { + return nil, errors.New("cannot accept empty value for upperdir") + } + } + } + if overlayFlag && strings.Contains(o, "workdir") { + splitOpt := strings.SplitN(o, "=", 2) + if len(splitOpt) > 1 { + workDir = splitOpt[1] + if workDir == "" { + return nil, errors.New("cannot accept empty value for workdir") + } + } + } } if overlayFlag { + var overlayMount spec.Mount + var overlayOpts *overlay.Options contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID()) if err != nil { return nil, err } - overlayMount, err := overlay.Mount(contentDir, mountPoint, namedVol.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions()) + + if (upperDir != "" && workDir == "") || (upperDir == "" && workDir != "") { + return nil, errors.Wrapf(err, "must specify both upperdir and workdir") + } + + overlayOpts = &overlay.Options{RootUID: c.RootUID(), + RootGID: c.RootGID(), + UpperDirOptionFragment: upperDir, + WorkDirOptionFragment: workDir, + GraphOpts: c.runtime.store.GraphOptions(), + } + + overlayMount, err = overlay.MountWithOptions(contentDir, mountPoint, namedVol.Dest, overlayOpts) if err != nil { return nil, errors.Wrapf(err, "mounting overlay failed %q", mountPoint) } for _, o := range namedVol.Options { - switch o { - case "U": - if err := chown.ChangeHostPathOwnership(mountPoint, true, int(hostUID), int(hostGID)); err != nil { + if o == "U" { + if err := c.ChangeHostPathOwnership(mountPoint, true, int(hostUID), int(hostGID)); err != nil { return nil, err } - if err := chown.ChangeHostPathOwnership(contentDir, true, int(hostUID), int(hostGID)); err != nil { + if err := c.ChangeHostPathOwnership(contentDir, true, int(hostUID), int(hostGID)); err != nil { return nil, err } } @@ -423,14 +539,15 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { if m.Type == "tmpfs" { options = append(options, []string{fmt.Sprintf("uid=%d", execUser.Uid), fmt.Sprintf("gid=%d", execUser.Gid)}...) } else { - if err := chown.ChangeHostPathOwnership(m.Source, true, int(hostUID), int(hostGID)); err != nil { + // only chown on initial creation of container + if err := c.ChangeHostPathOwnership(m.Source, true, int(hostUID), int(hostGID)); err != nil { return nil, err } } case "z": fallthrough case "Z": - if err := label.Relabel(m.Source, c.MountLabel(), label.IsShared(o)); err != nil { + if err := c.relabel(m.Source, c.MountLabel(), label.IsShared(o)); err != nil { return nil, err } @@ -455,6 +572,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { if c.IsReadOnly() && dstPath != "/dev/shm" { newMount.Options = append(newMount.Options, "ro", "nosuid", "noexec", "nodev") } + if dstPath == "/dev/shm" && c.state.BindMounts["/dev/shm"] == c.config.ShmDir { + newMount.Options = append(newMount.Options, "nosuid", "noexec", "nodev") + } if !MountExists(g.Mounts(), dstPath) { g.AddMount(newMount) } else { @@ -475,13 +595,12 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { // Check overlay volume options for _, o := range overlayVol.Options { - switch o { - case "U": - if err := chown.ChangeHostPathOwnership(overlayVol.Source, true, int(hostUID), int(hostGID)); err != nil { + if o == "U" { + if err := c.ChangeHostPathOwnership(overlayVol.Source, true, int(hostUID), int(hostGID)); err != nil { return nil, err } - if err := chown.ChangeHostPathOwnership(contentDir, true, int(hostUID), int(hostGID)); err != nil { + if err := c.ChangeHostPathOwnership(contentDir, true, int(hostUID), int(hostGID)); err != nil { return nil, err } } @@ -556,7 +675,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { } } - if c.config.Systemd { + if c.Systemd() { if err := c.setupSystemd(g.Mounts(), g); err != nil { return nil, errors.Wrapf(err, "error adding systemd-specific mounts") } @@ -715,18 +834,6 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { g.AddAnnotation(annotations.ContainerManager, annotations.ContainerManagerLibpod) } - // Only add container environment variable if not already present - foundContainerEnv := false - for _, env := range g.Config.Process.Env { - if strings.HasPrefix(env, "container=") { - foundContainerEnv = true - break - } - } - if !foundContainerEnv { - g.AddProcessEnv("container", "libpod") - } - cgroupPath, err := c.getOCICgroupPath() if err != nil { return nil, err @@ -735,7 +842,12 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { // Warning: CDI may alter g.Config in place. if len(c.config.CDIDevices) > 0 { - if err = cdi.UpdateOCISpecForDevices(g.Config, c.config.CDIDevices); err != nil { + registry := cdi.GetRegistry() + if errs := registry.GetErrors(); len(errs) > 0 { + logrus.Debugf("The following errors were triggered when creating the CDI registry: %v", errs) + } + _, err := registry.InjectDevices(g.Config, c.config.CDIDevices...) + if err != nil { return nil, errors.Wrapf(err, "error setting up CDI devices") } } @@ -861,6 +973,16 @@ func (c *Container) mountNotifySocket(g generate.Generator) error { // systemd expects to have /run, /run/lock and /tmp on tmpfs // It also expects to be able to write to /sys/fs/cgroup/systemd and /var/log/journal func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) error { + var containerUUIDSet bool + for _, s := range c.config.Spec.Process.Env { + if strings.HasPrefix(s, "container_uuid=") { + containerUUIDSet = true + break + } + } + if !containerUUIDSet { + g.AddProcessEnv("container_uuid", c.ID()[:32]) + } options := []string{"rw", "rprivate", "nosuid", "nodev"} for _, dest := range []string{"/run", "/run/lock"} { if MountExists(mounts, dest) { @@ -981,6 +1103,130 @@ func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr return nil } +func (c *Container) addCheckpointImageMetadata(importBuilder *buildah.Builder) error { + // Get information about host environment + hostInfo, err := c.Runtime().hostInfo() + if err != nil { + return fmt.Errorf("getting host info: %v", err) + } + + criuVersion, err := criu.GetCriuVestion() + if err != nil { + return fmt.Errorf("getting criu version: %v", err) + } + + rootfsImageID, rootfsImageName := c.Image() + + // Add image annotations with information about the container and the host. + // This information is useful to check compatibility before restoring the checkpoint + + checkpointImageAnnotations := map[string]string{ + define.CheckpointAnnotationName: c.config.Name, + define.CheckpointAnnotationRawImageName: c.config.RawImageName, + define.CheckpointAnnotationRootfsImageID: rootfsImageID, + define.CheckpointAnnotationRootfsImageName: rootfsImageName, + define.CheckpointAnnotationPodmanVersion: version.Version.String(), + define.CheckpointAnnotationCriuVersion: strconv.Itoa(criuVersion), + define.CheckpointAnnotationRuntimeName: hostInfo.OCIRuntime.Name, + define.CheckpointAnnotationRuntimeVersion: hostInfo.OCIRuntime.Version, + define.CheckpointAnnotationConmonVersion: hostInfo.Conmon.Version, + define.CheckpointAnnotationHostArch: hostInfo.Arch, + define.CheckpointAnnotationHostKernel: hostInfo.Kernel, + define.CheckpointAnnotationCgroupVersion: hostInfo.CgroupsVersion, + define.CheckpointAnnotationDistributionVersion: hostInfo.Distribution.Version, + define.CheckpointAnnotationDistributionName: hostInfo.Distribution.Distribution, + } + + for key, value := range checkpointImageAnnotations { + importBuilder.SetAnnotation(key, value) + } + + return nil +} + +func (c *Container) resolveCheckpointImageName(options *ContainerCheckpointOptions) error { + if options.CreateImage == "" { + return nil + } + + // Resolve image name + resolvedImageName, err := c.runtime.LibimageRuntime().ResolveName(options.CreateImage) + if err != nil { + return err + } + + options.CreateImage = resolvedImageName + return nil +} + +func (c *Container) createCheckpointImage(ctx context.Context, options ContainerCheckpointOptions) error { + if options.CreateImage == "" { + return nil + } + logrus.Debugf("Create checkpoint image %s", options.CreateImage) + + // Create storage reference + imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, options.CreateImage) + if err != nil { + return errors.Errorf("Failed to parse image name") + } + + // Build an image scratch + builderOptions := buildah.BuilderOptions{ + FromImage: "scratch", + } + importBuilder, err := buildah.NewBuilder(ctx, c.runtime.store, builderOptions) + if err != nil { + return err + } + // Clean-up buildah working container + defer func() { + if err := importBuilder.Delete(); err != nil { + logrus.Errorf("Image builder delete failed: %v", err) + } + }() + + if err := c.prepareCheckpointExport(); err != nil { + return err + } + + // Export checkpoint into temporary tar file + tmpDir, err := ioutil.TempDir("", "checkpoint_image_") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + options.TargetFile = path.Join(tmpDir, "checkpoint.tar") + + if err := c.exportCheckpoint(options); err != nil { + return err + } + + // Copy checkpoint from temporary tar file in the image + addAndCopyOptions := buildah.AddAndCopyOptions{} + if err := importBuilder.Add("", true, addAndCopyOptions, options.TargetFile); err != nil { + return err + } + + if err := c.addCheckpointImageMetadata(importBuilder); err != nil { + return err + } + + commitOptions := buildah.CommitOptions{ + Squash: true, + SystemContext: c.runtime.imageContext, + } + + // Create checkpoint image + id, _, _, err := importBuilder.Commit(ctx, imageRef, commitOptions) + if err != nil { + return err + } + logrus.Debugf("Created checkpoint image: %s", id) + return nil +} + func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { if len(c.Dependencies()) == 1 { // Check if the dependency is an infra container. If it is we can checkpoint @@ -1008,12 +1254,17 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { includeFiles := []string{ "artifacts", - "ctr.log", + metadata.DevShmCheckpointTar, metadata.ConfigDumpFile, metadata.SpecDumpFile, metadata.NetworkStatusFile, + stats.StatsDump, } + if c.LogDriver() == define.KubernetesLogging || + c.LogDriver() == define.JSONLogging { + includeFiles = append(includeFiles, "ctr.log") + } if options.PreCheckPoint { includeFiles = append(includeFiles, preCheckpointDir) } else { @@ -1037,7 +1288,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { } // Folder containing archived volumes that will be included in the export - expVolDir := filepath.Join(c.bundlePath(), "volumes") + expVolDir := filepath.Join(c.bundlePath(), metadata.CheckpointVolumesDirectory) // Create an archive for each volume associated with the container if !options.IgnoreVolumes { @@ -1046,7 +1297,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error { } for _, v := range c.config.NamedVolumes { - volumeTarFilePath := filepath.Join("volumes", v.Name+".tar") + volumeTarFilePath := filepath.Join(metadata.CheckpointVolumesDirectory, v.Name+".tar") volumeTarFileFullPath := filepath.Join(c.bundlePath(), volumeTarFilePath) volumeTarFile, err := os.Create(volumeTarFileFullPath) @@ -1131,25 +1382,57 @@ func (c *Container) checkpointRestoreSupported(version int) error { return nil } -func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) error { +func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointOptions) (*define.CRIUCheckpointRestoreStatistics, int64, error) { if err := c.checkpointRestoreSupported(criu.MinCriuVersion); err != nil { - return err + return nil, 0, err } if c.state.State != define.ContainerStateRunning { - return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State) + return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State) } if c.AutoRemove() && options.TargetFile == "" { - return errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used") + return nil, 0, errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used") + } + + if err := c.resolveCheckpointImageName(&options); err != nil { + return nil, 0, err } if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "dump.log", c.MountLabel()); err != nil { - return err + return nil, 0, err } - if err := c.ociRuntime.CheckpointContainer(c, options); err != nil { - return err + // Setting CheckpointLog early in case there is a failure. + c.state.CheckpointLog = path.Join(c.bundlePath(), "dump.log") + c.state.CheckpointPath = c.CheckpointPath() + + runtimeCheckpointDuration, err := c.ociRuntime.CheckpointContainer(c, options) + if err != nil { + return nil, 0, err + } + + // Keep the content of /dev/shm directory + if c.config.ShmDir != "" && c.state.BindMounts["/dev/shm"] == c.config.ShmDir { + shmDirTarFileFullPath := filepath.Join(c.bundlePath(), metadata.DevShmCheckpointTar) + + shmDirTarFile, err := os.Create(shmDirTarFileFullPath) + if err != nil { + return nil, 0, err + } + defer shmDirTarFile.Close() + + input, err := archive.TarWithOptions(c.config.ShmDir, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeSourceDir: true, + }) + if err != nil { + return nil, 0, err + } + + if _, err = io.Copy(shmDirTarFile, input); err != nil { + return nil, 0, err + } } // Save network.status. This is needed to restore the container with @@ -1157,7 +1440,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO // with one interface. // FIXME: will this break something? if _, err := metadata.WriteJSONFile(c.getNetworkStatus(), c.bundlePath(), metadata.NetworkStatusFile); err != nil { - return err + return nil, 0, err } defer c.newContainerEvent(events.Checkpoint) @@ -1167,13 +1450,17 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO if options.WithPrevious { os.Remove(path.Join(c.CheckpointPath(), "parent")) if err := os.Symlink("../pre-checkpoint", path.Join(c.CheckpointPath(), "parent")); err != nil { - return err + return nil, 0, err } } if options.TargetFile != "" { if err := c.exportCheckpoint(options); err != nil { - return err + return nil, 0, err + } + } else { + if err := c.createCheckpointImage(ctx, options); err != nil { + return nil, 0, err } } @@ -1182,17 +1469,47 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO if !options.KeepRunning && !options.PreCheckPoint { c.state.State = define.ContainerStateStopped c.state.Checkpointed = true + c.state.CheckpointedTime = time.Now() + c.state.Restored = false + c.state.RestoredTime = time.Time{} // Cleanup Storage and Network if err := c.cleanup(ctx); err != nil { - return err + return nil, 0, err + } + } + + criuStatistics, err := func() (*define.CRIUCheckpointRestoreStatistics, error) { + if !options.PrintStats { + return nil, nil + } + statsDirectory, err := os.Open(c.bundlePath()) + if err != nil { + return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath()) + } + + dumpStatistics, err := stats.CriuGetDumpStats(statsDirectory) + if err != nil { + return nil, errors.Wrap(err, "Displaying checkpointing statistics not possible") } + + return &define.CRIUCheckpointRestoreStatistics{ + FreezingTime: dumpStatistics.GetFreezingTime(), + FrozenTime: dumpStatistics.GetFrozenTime(), + MemdumpTime: dumpStatistics.GetMemdumpTime(), + MemwriteTime: dumpStatistics.GetMemwriteTime(), + PagesScanned: dumpStatistics.GetPagesScanned(), + PagesWritten: dumpStatistics.GetPagesWritten(), + }, nil + }() + if err != nil { + return nil, 0, err } if !options.Keep && !options.PreCheckPoint { cleanup := []string{ "dump.log", - "stats-dump", + stats.StatsDump, metadata.ConfigDumpFile, metadata.SpecDumpFile, } @@ -1202,19 +1519,22 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO logrus.Debugf("Unable to remove file %s", file) } } + // The file has been deleted. Do not mention it. + c.state.CheckpointLog = "" } c.state.FinishedTime = time.Now() - return c.save() + return criuStatistics, runtimeCheckpointDuration, c.save() } -func (c *Container) importCheckpoint(input string) error { - if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil { - return err - } - +func (c *Container) generateContainerSpec() error { // Make sure the newly created config.json exists on disk - g := generate.Generator{Config: c.config.Spec} + + // NewFromSpec() is deprecated according to its comment + // however the recommended replace just causes a nil map panic + //nolint:staticcheck + g := generate.NewFromSpec(c.config.Spec) + if err := c.saveSpec(g.Config); err != nil { return errors.Wrap(err, "saving imported container specification for restore failed") } @@ -1222,6 +1542,55 @@ func (c *Container) importCheckpoint(input string) error { return nil } +func (c *Container) importCheckpointImage(ctx context.Context, imageID string) error { + img, _, err := c.Runtime().LibimageRuntime().LookupImage(imageID, nil) + if err != nil { + return err + } + + mountPoint, err := img.Mount(ctx, nil, "") + defer func() { + if err := c.unmount(true); err != nil { + logrus.Errorf("Failed to unmount container: %v", err) + } + }() + if err != nil { + return err + } + + // Import all checkpoint files except ConfigDumpFile and SpecDumpFile. We + // generate new container config files to enable to specifying a new + // container name. + checkpoint := []string{ + "artifacts", + metadata.CheckpointDirectory, + metadata.CheckpointVolumesDirectory, + metadata.DevShmCheckpointTar, + metadata.RootFsDiffTar, + metadata.DeletedFilesFile, + metadata.PodOptionsFile, + metadata.PodDumpFile, + } + + for _, name := range checkpoint { + src := filepath.Join(mountPoint, name) + dst := filepath.Join(c.bundlePath(), name) + if err := archive.NewDefaultArchiver().CopyWithTar(src, dst); err != nil { + logrus.Debugf("Can't import '%s' from checkpoint image", name) + } + } + + return c.generateContainerSpec() +} + +func (c *Container) importCheckpointTar(input string) error { + if err := crutils.CRImportCheckpointWithoutConfig(c.bundlePath(), input); err != nil { + return err + } + + return c.generateContainerSpec() +} + func (c *Container) importPreCheckpoint(input string) error { archiveFile, err := os.Open(input) if err != nil { @@ -1237,7 +1606,7 @@ func (c *Container) importPreCheckpoint(input string) error { return nil } -func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (retErr error) { +func (c *Container) restore(ctx context.Context, options ContainerCheckpointOptions) (criuStatistics *define.CRIUCheckpointRestoreStatistics, runtimeRestoreDuration int64, retErr error) { minCriuVersion := func() int { if options.Pod == "" { return criu.MinCriuVersion @@ -1245,55 +1614,46 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti return criu.PodCriuVersion }() if err := c.checkpointRestoreSupported(minCriuVersion); err != nil { - return err + return nil, 0, err } if options.Pod != "" && !crutils.CRRuntimeSupportsPodCheckpointRestore(c.ociRuntime.Path()) { - return errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path()) + return nil, 0, errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path()) } if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) { - return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID()) + return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID()) } if options.ImportPrevious != "" { if err := c.importPreCheckpoint(options.ImportPrevious); err != nil { - return err + return nil, 0, err } } if options.TargetFile != "" { - if err := c.importCheckpoint(options.TargetFile); err != nil { - return err + if err := c.importCheckpointTar(options.TargetFile); err != nil { + return nil, 0, err + } + } else if options.CheckpointImageID != "" { + if err := c.importCheckpointImage(ctx, options.CheckpointImageID); err != nil { + return nil, 0, err } } // Let's try to stat() CRIU's inventory file. If it does not exist, it makes // no sense to try a restore. This is a minimal check if a checkpoint exist. if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) { - return errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore") + return nil, 0, errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore") } if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil { - return err - } - - // If a container is restored multiple times from an exported checkpoint with - // the help of '--import --name', the restore will fail if during 'podman run' - // a static container IP was set with '--ip'. The user can tell the restore - // process to ignore the static IP with '--ignore-static-ip' - if options.IgnoreStaticIP { - c.config.StaticIP = nil + return nil, 0, err } - // If a container is restored multiple times from an exported checkpoint with - // the help of '--import --name', the restore will fail if during 'podman run' - // a static container MAC address was set with '--mac-address'. The user - // can tell the restore process to ignore the static MAC with - // '--ignore-static-mac' - if options.IgnoreStaticMAC { - c.config.StaticMAC = nil - } + // Setting RestoreLog early in case there is a failure. + c.state.RestoreLog = path.Join(c.bundlePath(), "restore.log") + c.state.CheckpointPath = c.CheckpointPath() // Read network configuration from checkpoint var netStatus map[string]types.StatusBlock @@ -1310,37 +1670,30 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) { // The file with the network.status does exist. Let's restore the // container with the same networks settings as during checkpointing. - aliases, err := c.GetAllNetworkAliases() + networkOpts, err := c.networks() if err != nil { - return err + return nil, 0, err } + netOpts := make(map[string]types.PerNetworkOptions, len(netStatus)) - for network, status := range netStatus { - perNetOpts := types.PerNetworkOptions{} - for name, netInt := range status.Interfaces { - perNetOpts = types.PerNetworkOptions{ - InterfaceName: name, - Aliases: aliases[network], - } - if !options.IgnoreStaticMAC { + for network, perNetOpts := range networkOpts { + // unset mac and ips before we start adding the ones from the status + perNetOpts.StaticMAC = nil + perNetOpts.StaticIPs = nil + for name, netInt := range netStatus[network].Interfaces { + perNetOpts.InterfaceName = name + if !options.IgnoreStaticIP { perNetOpts.StaticMAC = netInt.MacAddress } if !options.IgnoreStaticIP { - for _, netAddress := range netInt.Networks { - perNetOpts.StaticIPs = append(perNetOpts.StaticIPs, netAddress.Subnet.IP) + for _, netAddress := range netInt.Subnets { + perNetOpts.StaticIPs = append(perNetOpts.StaticIPs, netAddress.IPNet.IP) } } // Normally interfaces have a length of 1, only for some special cni configs we could get more. // For now just use the first interface to get the ips this should be good enough for most cases. break } - if perNetOpts.InterfaceName == "" { - eth, exists := c.state.NetInterfaceDescriptions.getInterfaceByName(network) - if !exists { - return errors.Errorf("no network interface name for container %s on network %s", c.config.ID, network) - } - perNetOpts.InterfaceName = eth - } netOpts[network] = perNetOpts } c.perNetworkOpts = netOpts @@ -1355,7 +1708,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti }() if err := c.prepare(); err != nil { - return err + return nil, 0, err } // Read config @@ -1364,11 +1717,11 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti g, err := generate.NewFromFile(jsonPath) if err != nil { logrus.Debugf("generate.NewFromFile failed with %v", err) - return err + return nil, 0, err } // Restoring from an import means that we are doing migration - if options.TargetFile != "" { + if options.TargetFile != "" || options.CheckpointImageID != "" { g.SetRootPath(c.state.Mountpoint) } @@ -1380,7 +1733,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti } if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), netNSPath); err != nil { - return err + return nil, 0, err } } @@ -1389,23 +1742,23 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // the ones from the infrastructure container. pod, err := c.runtime.LookupPod(options.Pod) if err != nil { - return errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod) + return nil, 0, errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod) } infraContainer, err := pod.InfraContainer() if err != nil { - return errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod) + return nil, 0, errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod) } infraContainer.lock.Lock() if err := infraContainer.syncContainer(); err != nil { infraContainer.lock.Unlock() - return errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID()) + return nil, 0, errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID()) } if infraContainer.state.State != define.ContainerStateRunning { if err := infraContainer.initAndStart(ctx); err != nil { infraContainer.lock.Unlock() - return errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID()) + return nil, 0, errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID()) } } infraContainer.lock.Unlock() @@ -1413,59 +1766,59 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if c.config.IPCNsCtr != "" { nsPath, err := infraContainer.namespacePath(IPCNS) if err != nil { - return errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod) + return nil, 0, errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod) } if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), nsPath); err != nil { - return err + return nil, 0, err } } if c.config.NetNsCtr != "" { nsPath, err := infraContainer.namespacePath(NetNS) if err != nil { - return errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod) + return nil, 0, errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod) } if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), nsPath); err != nil { - return err + return nil, 0, err } } if c.config.PIDNsCtr != "" { nsPath, err := infraContainer.namespacePath(PIDNS) if err != nil { - return errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod) + return nil, 0, errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod) } if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), nsPath); err != nil { - return err + return nil, 0, err } } if c.config.UTSNsCtr != "" { nsPath, err := infraContainer.namespacePath(UTSNS) if err != nil { - return errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod) + return nil, 0, errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod) } if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), nsPath); err != nil { - return err + return nil, 0, err } } if c.config.CgroupNsCtr != "" { nsPath, err := infraContainer.namespacePath(CgroupNS) if err != nil { - return errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod) + return nil, 0, errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod) } if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), nsPath); err != nil { - return err + return nil, 0, err } } } if err := c.makeBindMounts(); err != nil { - return err + return nil, 0, err } - if options.TargetFile != "" { + if options.TargetFile != "" || options.CheckpointImageID != "" { for dstPath, srcPath := range c.state.BindMounts { newMount := spec.Mount{ Type: "bind", @@ -1476,48 +1829,69 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if c.IsReadOnly() && dstPath != "/dev/shm" { newMount.Options = append(newMount.Options, "ro", "nosuid", "noexec", "nodev") } + if dstPath == "/dev/shm" && c.state.BindMounts["/dev/shm"] == c.config.ShmDir { + newMount.Options = append(newMount.Options, "nosuid", "noexec", "nodev") + } if !MountExists(g.Mounts(), dstPath) { g.AddMount(newMount) } } } + // Restore /dev/shm content + if c.config.ShmDir != "" && c.state.BindMounts["/dev/shm"] == c.config.ShmDir { + shmDirTarFileFullPath := filepath.Join(c.bundlePath(), metadata.DevShmCheckpointTar) + if _, err := os.Stat(shmDirTarFileFullPath); err != nil { + logrus.Debug("Container checkpoint doesn't contain dev/shm: ", err.Error()) + } else { + shmDirTarFile, err := os.Open(shmDirTarFileFullPath) + if err != nil { + return nil, 0, err + } + defer shmDirTarFile.Close() + + if err := archive.UntarUncompressed(shmDirTarFile, c.config.ShmDir, nil); err != nil { + return nil, 0, err + } + } + } + // Cleanup for a working restore. if err := c.removeConmonFiles(); err != nil { - return err + return nil, 0, err } // Save the OCI spec to disk if err := c.saveSpec(g.Config); err != nil { - return err + return nil, 0, err } // When restoring from an imported archive, allow restoring the content of volumes. // Volumes are created in setupContainer() - if options.TargetFile != "" && !options.IgnoreVolumes { + if !options.IgnoreVolumes && (options.TargetFile != "" || options.CheckpointImageID != "") { for _, v := range c.config.NamedVolumes { - volumeFilePath := filepath.Join(c.bundlePath(), "volumes", v.Name+".tar") + volumeFilePath := filepath.Join(c.bundlePath(), metadata.CheckpointVolumesDirectory, v.Name+".tar") volumeFile, err := os.Open(volumeFilePath) if err != nil { - return errors.Wrapf(err, "failed to open volume file %s", volumeFilePath) + return nil, 0, errors.Wrapf(err, "failed to open volume file %s", volumeFilePath) } defer volumeFile.Close() volume, err := c.runtime.GetVolume(v.Name) if err != nil { - return errors.Wrapf(err, "failed to retrieve volume %s", v.Name) + return nil, 0, errors.Wrapf(err, "failed to retrieve volume %s", v.Name) } mountPoint, err := volume.MountPoint() if err != nil { - return err + return nil, 0, err } if mountPoint == "" { - return errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name()) + return nil, 0, errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name()) } if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil { - return errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint) + return nil, 0, errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint) } } } @@ -1525,21 +1899,52 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti // Before actually restarting the container, apply the root file-system changes if !options.IgnoreRootfs { if err := crutils.CRApplyRootFsDiffTar(c.bundlePath(), c.state.Mountpoint); err != nil { - return err + return nil, 0, err } if err := crutils.CRRemoveDeletedFiles(c.ID(), c.bundlePath(), c.state.Mountpoint); err != nil { - return err + return nil, 0, err } } - if err := c.ociRuntime.CreateContainer(c, &options); err != nil { - return err + runtimeRestoreDuration, err = c.ociRuntime.CreateContainer(c, &options) + if err != nil { + return nil, 0, err + } + + criuStatistics, err = func() (*define.CRIUCheckpointRestoreStatistics, error) { + if !options.PrintStats { + return nil, nil + } + statsDirectory, err := os.Open(c.bundlePath()) + if err != nil { + return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath()) + } + + restoreStatistics, err := stats.CriuGetRestoreStats(statsDirectory) + if err != nil { + return nil, errors.Wrap(err, "Displaying restore statistics not possible") + } + + return &define.CRIUCheckpointRestoreStatistics{ + PagesCompared: restoreStatistics.GetPagesCompared(), + PagesSkippedCow: restoreStatistics.GetPagesSkippedCow(), + ForkingTime: restoreStatistics.GetForkingTime(), + RestoreTime: restoreStatistics.GetRestoreTime(), + PagesRestored: restoreStatistics.GetPagesRestored(), + }, nil + }() + if err != nil { + return nil, 0, err } logrus.Debugf("Restored container %s", c.ID()) c.state.State = define.ContainerStateRunning + c.state.Checkpointed = false + c.state.Restored = true + c.state.CheckpointedTime = time.Time{} + c.state.RestoredTime = time.Now() if !options.Keep { // Delete all checkpoint related files. At this point, in theory, all files @@ -1550,15 +1955,21 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti if err != nil { logrus.Debugf("Non-fatal: removal of checkpoint directory (%s) failed: %v", c.CheckpointPath(), err) } + c.state.CheckpointPath = "" err = os.RemoveAll(c.PreCheckPointPath()) if err != nil { logrus.Debugf("Non-fatal: removal of pre-checkpoint directory (%s) failed: %v", c.PreCheckPointPath(), err) } + err = os.RemoveAll(c.CheckpointVolumesPath()) + if err != nil { + logrus.Debugf("Non-fatal: removal of checkpoint volumes directory (%s) failed: %v", c.CheckpointVolumesPath(), err) + } cleanup := [...]string{ "restore.log", "dump.log", - "stats-dump", - "stats-restore", + stats.StatsDump, + stats.StatsRestore, + metadata.DevShmCheckpointTar, metadata.NetworkStatusFile, metadata.RootFsDiffTar, metadata.DeletedFilesFile, @@ -1570,9 +1981,11 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti logrus.Debugf("Non-fatal: removal of checkpoint file (%s) failed: %v", file, err) } } + c.state.CheckpointLog = "" + c.state.RestoreLog = "" } - return c.save() + return criuStatistics, runtimeRestoreDuration, c.save() } // Retrieves a container's "root" net namespace container dependency. @@ -1603,6 +2016,17 @@ func (c *Container) getRootNetNsDepCtr() (depCtr *Container, err error) { return depCtr, nil } +// Ensure standard bind mounts are mounted into all root directories (including chroot directories) +func (c *Container) mountIntoRootDirs(mountName string, mountPath string) error { + c.state.BindMounts[mountName] = mountPath + + for _, chrootDir := range c.config.ChrootDirs { + c.state.BindMounts[filepath.Join(chrootDir, mountName)] = mountPath + } + + return nil +} + // Make standard bind mounts to include in the container func (c *Container) makeBindMounts() error { if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil { @@ -1656,27 +2080,37 @@ func (c *Container) makeBindMounts() error { // If it doesn't, don't copy them resolvPath, exists := bindMounts["/etc/resolv.conf"] if !c.config.UseImageResolvConf && exists { - c.state.BindMounts["/etc/resolv.conf"] = resolvPath + err := c.mountIntoRootDirs("/etc/resolv.conf", resolvPath) + + if err != nil { + return errors.Wrapf(err, "error assigning mounts to container %s", c.ID()) + } } // check if dependency container has an /etc/hosts file. // It may not have one, so only use it if it does. - hostsPath, exists := bindMounts["/etc/hosts"] + hostsPath, exists := bindMounts[config.DefaultHostsFile] if !c.config.UseImageHosts && exists { - depCtr.lock.Lock() - // generate a hosts file for the dependency container, - // based on either its old hosts file, or the default, - // and add the relevant information from the new container (hosts and IP) - hostsPath, err = depCtr.appendHosts(hostsPath, c) + // we cannot use the dependency container lock due ABBA deadlocks in cleanup() + lock, err := lockfile.GetLockfile(hostsPath) + if err != nil { + return fmt.Errorf("failed to lock hosts file: %w", err) + } + lock.Lock() + // add the newly added container to the hosts file + // we always use 127.0.0.1 as ip since they have the same netns + err = etchosts.Add(hostsPath, getLocalhostHostEntry(c)) + lock.Unlock() if err != nil { - depCtr.lock.Unlock() return errors.Wrapf(err, "error creating hosts file for container %s which depends on container %s", c.ID(), depCtr.ID()) } - depCtr.lock.Unlock() // finally, save it in the new container - c.state.BindMounts["/etc/hosts"] = hostsPath + err = c.mountIntoRootDirs(config.DefaultHostsFile, hostsPath) + if err != nil { + return errors.Wrapf(err, "error assigning mounts to container %s", c.ID()) + } } if !hasCurrentUserMapped(c) { @@ -1689,61 +2123,57 @@ func (c *Container) makeBindMounts() error { } } else { if !c.config.UseImageResolvConf { - newResolv, err := c.generateResolvConf() - if err != nil { + if err := c.generateResolvConf(); err != nil { return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID()) } - c.state.BindMounts["/etc/resolv.conf"] = newResolv } if !c.config.UseImageHosts { - newHosts, err := c.generateHosts("/etc/hosts") - if err != nil { + if err := c.createHosts(); err != nil { return errors.Wrapf(err, "error creating hosts file for container %s", c.ID()) } - c.state.BindMounts["/etc/hosts"] = newHosts } } if c.state.BindMounts["/etc/hosts"] != "" { - if err := label.Relabel(c.state.BindMounts["/etc/hosts"], c.config.MountLabel, true); err != nil { + if err := c.relabel(c.state.BindMounts["/etc/hosts"], c.config.MountLabel, true); err != nil { return err } } if c.state.BindMounts["/etc/resolv.conf"] != "" { - if err := label.Relabel(c.state.BindMounts["/etc/resolv.conf"], c.config.MountLabel, true); err != nil { + if err := c.relabel(c.state.BindMounts["/etc/resolv.conf"], c.config.MountLabel, true); err != nil { return err } } - } else { - if !c.config.UseImageHosts && c.state.BindMounts["/etc/hosts"] == "" { - newHosts, err := c.generateHosts("/etc/hosts") - if err != nil { - return errors.Wrapf(err, "error creating hosts file for container %s", c.ID()) - } - c.state.BindMounts["/etc/hosts"] = newHosts + } else if !c.config.UseImageHosts && c.state.BindMounts["/etc/hosts"] == "" { + if err := c.createHosts(); err != nil { + return errors.Wrapf(err, "error creating hosts file for container %s", c.ID()) } } - // SHM is always added when we mount the container - c.state.BindMounts["/dev/shm"] = c.config.ShmDir - - newPasswd, newGroup, err := c.generatePasswdAndGroup() - if err != nil { - return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID()) + if c.config.ShmDir != "" { + // If ShmDir has a value SHM is always added when we mount the container + c.state.BindMounts["/dev/shm"] = c.config.ShmDir } - if newPasswd != "" { - // Make /etc/passwd - // If it already exists, delete so we can recreate - delete(c.state.BindMounts, "/etc/passwd") - c.state.BindMounts["/etc/passwd"] = newPasswd - } - if newGroup != "" { - // Make /etc/group - // If it already exists, delete so we can recreate - delete(c.state.BindMounts, "/etc/group") - c.state.BindMounts["/etc/group"] = newGroup + + if c.config.Passwd == nil || *c.config.Passwd { + newPasswd, newGroup, err := c.generatePasswdAndGroup() + if err != nil { + return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID()) + } + if newPasswd != "" { + // Make /etc/passwd + // If it already exists, delete so we can recreate + delete(c.state.BindMounts, "/etc/passwd") + c.state.BindMounts["/etc/passwd"] = newPasswd + } + if newGroup != "" { + // Make /etc/group + // If it already exists, delete so we can recreate + delete(c.state.BindMounts, "/etc/group") + c.state.BindMounts["/etc/group"] = newGroup + } } // Make /etc/hostname @@ -1833,8 +2263,17 @@ rootless=%d return errors.Wrapf(err, "error creating secrets mount") } for _, secret := range c.Secrets() { + secretFileName := secret.Name + base := "/run/secrets" + if secret.Target != "" { + secretFileName = secret.Target + // If absolute path for target given remove base. + if filepath.IsAbs(secretFileName) { + base = "" + } + } src := filepath.Join(c.config.SecretsPath, secret.Name) - dest := filepath.Join("/run/secrets", secret.Name) + dest := filepath.Join(base, secretFileName) c.state.BindMounts[dest] = src } } @@ -1843,23 +2282,25 @@ rootless=%d } // generateResolvConf generates a containers resolv.conf -func (c *Container) generateResolvConf() (string, error) { +func (c *Container) generateResolvConf() error { var ( nameservers []string networkNameServers []string networkSearchDomains []string ) + hostns := true resolvConf := "/etc/resolv.conf" for _, namespace := range c.config.Spec.Linux.Namespaces { if namespace.Type == spec.NetworkNamespace { + hostns = false if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") { definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf") _, err := os.Stat(definedPath) if err == nil { resolvConf = definedPath } else if !os.IsNotExist(err) { - return "", err + return err } } break @@ -1869,36 +2310,25 @@ func (c *Container) generateResolvConf() (string, error) { contents, err := ioutil.ReadFile(resolvConf) // resolv.conf doesn't have to exists if err != nil && !os.IsNotExist(err) { - return "", err + return err } ns := resolvconf.GetNameservers(contents) // check if systemd-resolved is used, assume it is used when 127.0.0.53 is the only nameserver - if len(ns) == 1 && ns[0] == "127.0.0.53" { + if !hostns && len(ns) == 1 && ns[0] == "127.0.0.53" { // read the actual resolv.conf file for systemd-resolved resolvedContents, err := ioutil.ReadFile("/run/systemd/resolve/resolv.conf") if err != nil { if !os.IsNotExist(err) { - return "", errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf") + return errors.Wrapf(err, "detected that systemd-resolved is in use, but could not locate real resolv.conf") } } else { contents = resolvedContents } } - ipv6 := false - // If network status is set check for ipv6 and dns namesevers netStatus := c.getNetworkStatus() for _, status := range netStatus { - for _, netInt := range status.Interfaces { - for _, netAddress := range netInt.Networks { - // Note: only using To16() does not work since it also returns a valid ip for ipv4 - if netAddress.Subnet.IP.To4() == nil && netAddress.Subnet.IP.To16() != nil { - ipv6 = true - } - } - } - if status.DNSServerIPs != nil { for _, nsIP := range status.DNSServerIPs { networkNameServers = append(networkNameServers, nsIP.String()) @@ -1911,67 +2341,60 @@ func (c *Container) generateResolvConf() (string, error) { } } - if c.config.NetMode.IsSlirp4netns() { - ctrNetworkSlipOpts := []string{} - if c.config.NetworkOptions != nil { - ctrNetworkSlipOpts = append(ctrNetworkSlipOpts, c.config.NetworkOptions["slirp4netns"]...) - } - slirpOpts, err := parseSlirp4netnsNetworkOptions(c.runtime, ctrNetworkSlipOpts) - if err != nil { - return "", err - } - ipv6 = slirpOpts.enableIPv6 + ipv6, err := c.checkForIPv6(netStatus) + if err != nil { + return err } // Ensure that the container's /etc/resolv.conf is compatible with its // network configuration. - resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, c.config.CreateNetNS) + resolv, err := resolvconf.FilterResolvDNS(contents, ipv6, !hostns) if err != nil { - return "", errors.Wrapf(err, "error parsing host resolv.conf") + return errors.Wrapf(err, "error parsing host resolv.conf") } - dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers)) + dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers)+len(c.config.DNSServer)) for _, i := range c.runtime.config.Containers.DNSServers { result := net.ParseIP(i) if result == nil { - return "", errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i) + return errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i) } dns = append(dns, result) } - dnsServers := append(dns, c.config.DNSServer...) + dns = append(dns, c.config.DNSServer...) // If the user provided dns, it trumps all; then dns masq; then resolv.conf + var search []string switch { - case len(dnsServers) > 0: - + case len(dns) > 0: // We store DNS servers as net.IP, so need to convert to string - for _, server := range dnsServers { + for _, server := range dns { nameservers = append(nameservers, server.String()) } - case len(networkNameServers) > 0: - nameservers = append(nameservers, networkNameServers...) default: // Make a new resolv.conf - nameservers = resolvconf.GetNameservers(resolv.Content) - // slirp4netns has a built in DNS server. + // first add the nameservers from the networks status + nameservers = append(nameservers, networkNameServers...) + // when we add network dns server we also have to add the search domains + search = networkSearchDomains + // slirp4netns has a built in DNS forwarder. if c.config.NetMode.IsSlirp4netns() { slirp4netnsDNS, err := GetSlirp4netnsDNS(c.slirp4netnsSubnet) if err != nil { logrus.Warn("Failed to determine Slirp4netns DNS: ", err.Error()) } else { - nameservers = append([]string{slirp4netnsDNS.String()}, nameservers...) + nameservers = append(nameservers, slirp4netnsDNS.String()) } } + nameservers = append(nameservers, resolvconf.GetNameservers(resolv.Content)...) } - var search []string - if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 || len(networkSearchDomains) > 0 { + if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 { if !util.StringInSlice(".", c.config.DNSSearch) { - search = c.runtime.config.Containers.DNSSearches + search = append(search, c.runtime.config.Containers.DNSSearches...) search = append(search, c.config.DNSSearch...) - search = append(search, networkSearchDomains...) } } else { - search = resolvconf.GetSearchDomains(resolv.Content) + search = append(search, resolvconf.GetSearchDomains(resolv.Content)...) } var options []string @@ -1985,140 +2408,204 @@ func (c *Container) generateResolvConf() (string, error) { destPath := filepath.Join(c.state.RunDir, "resolv.conf") if err := os.Remove(destPath); err != nil && !os.IsNotExist(err) { - return "", errors.Wrapf(err, "container %s", c.ID()) + return errors.Wrapf(err, "container %s", c.ID()) } // Build resolv.conf if _, err = resolvconf.Build(destPath, nameservers, search, options); err != nil { - return "", errors.Wrapf(err, "error building resolv.conf for container %s", c.ID()) + return errors.Wrapf(err, "error building resolv.conf for container %s", c.ID()) } - // Relabel resolv.conf for the container - if err := label.Relabel(destPath, c.config.MountLabel, true); err != nil { - return "", err + return c.bindMountRootFile(destPath, "/etc/resolv.conf") +} + +// Check if a container uses IPv6. +func (c *Container) checkForIPv6(netStatus map[string]types.StatusBlock) (bool, error) { + for _, status := range netStatus { + for _, netInt := range status.Interfaces { + for _, netAddress := range netInt.Subnets { + // Note: only using To16() does not work since it also returns a valid ip for ipv4 + if netAddress.IPNet.IP.To4() == nil && netAddress.IPNet.IP.To16() != nil { + return true, nil + } + } + } + } + + if c.config.NetMode.IsSlirp4netns() { + ctrNetworkSlipOpts := []string{} + if c.config.NetworkOptions != nil { + ctrNetworkSlipOpts = append(ctrNetworkSlipOpts, c.config.NetworkOptions["slirp4netns"]...) + } + slirpOpts, err := parseSlirp4netnsNetworkOptions(c.runtime, ctrNetworkSlipOpts) + if err != nil { + return false, err + } + return slirpOpts.enableIPv6, nil } - return destPath, nil + return false, nil } -// generateHosts creates a containers hosts file -func (c *Container) generateHosts(path string) (string, error) { - orig, err := ioutil.ReadFile(path) +// Add a new nameserver to the container's resolv.conf, ensuring that it is the +// first nameserver present. +// Usable only with running containers. +func (c *Container) addNameserver(ips []string) error { + // Take no action if container is not running. + if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { + return nil + } + + // Do we have a resolv.conf at all? + path, ok := c.state.BindMounts["/etc/resolv.conf"] + if !ok { + return nil + } + + // Read in full contents, parse out existing nameservers + contents, err := ioutil.ReadFile(path) if err != nil { - return "", err + return err } - hosts := string(orig) - hosts += c.getHosts() + ns := resolvconf.GetNameservers(contents) + options := resolvconf.GetOptions(contents) + search := resolvconf.GetSearchDomains(contents) - hosts = c.appendLocalhost(hosts) + // We could verify that it doesn't already exist + // but extra nameservers shouldn't harm anything. + // Ensure we are the first entry in resolv.conf though, otherwise we + // might be after user-added servers. + ns = append(ips, ns...) - return c.writeStringToRundir("hosts", hosts) + // We're rewriting the container's resolv.conf as part of this, but we + // hold the container lock, so there should be no risk of parallel + // modification. + if _, err := resolvconf.Build(path, ns, search, options); err != nil { + return errors.Wrapf(err, "error adding new nameserver to container %s resolv.conf", c.ID()) + } + + return nil } -// based on networking mode we may want to append the localhost -// if there isn't any record for it and also this shoud happen -// in slirp4netns and similar network modes. -func (c *Container) appendLocalhost(hosts string) string { - if !strings.Contains(hosts, "localhost") && - !c.config.NetMode.IsHost() { - hosts += "127.0.0.1\tlocalhost\n::1\tlocalhost\n" +// Remove an entry from the existing resolv.conf of the container. +// Usable only with running containers. +func (c *Container) removeNameserver(ips []string) error { + // Take no action if container is not running. + if !c.ensureState(define.ContainerStateRunning, define.ContainerStateCreated) { + return nil } - return hosts -} + // Do we have a resolv.conf at all? + path, ok := c.state.BindMounts["/etc/resolv.conf"] + if !ok { + return nil + } -// appendHosts appends a container's config and state pertaining to hosts to a container's -// local hosts file. netCtr is the container from which the netNS information is -// taken. -// path is the basis of the hosts file, into which netCtr's netNS information will be appended. -// FIXME. Path should be used by this function,but I am not sure what is correct; remove //lint -// once this is fixed -func (c *Container) appendHosts(path string, netCtr *Container) (string, error) { //nolint - return c.appendStringToRunDir("hosts", netCtr.getHosts()) -} + // Read in full contents, parse out existing nameservers + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + ns := resolvconf.GetNameservers(contents) + options := resolvconf.GetOptions(contents) + search := resolvconf.GetSearchDomains(contents) -// getHosts finds the pertinent information for a container's host file in its config and state -// and returns a string in a format that can be written to the host file -func (c *Container) getHosts() string { - var hosts string + toRemove := make(map[string]bool) + for _, ip := range ips { + toRemove[ip] = true + } - if len(c.config.HostAdd) > 0 { - for _, host := range c.config.HostAdd { - // the host format has already been verified at this point - fields := strings.SplitN(host, ":", 2) - hosts += fmt.Sprintf("%s %s\n", fields[1], fields[0]) + newNS := make([]string, 0, len(ns)) + for _, server := range ns { + if !toRemove[server] { + newNS = append(newNS, server) } } - hosts += c.cniHosts() + if _, err := resolvconf.Build(path, newNS, search, options); err != nil { + return errors.Wrapf(err, "error removing nameservers from container %s resolv.conf", c.ID()) + } - // Add hostname for slirp4netns - if c.Hostname() != "" { - if c.config.NetMode.IsSlirp4netns() { - // When using slirp4netns, the interface gets a static IP - slirp4netnsIP, err := GetSlirp4netnsIP(c.slirp4netnsSubnet) - if err != nil { - logrus.Warnf("Failed to determine slirp4netnsIP: %v", err.Error()) - } else { - hosts += fmt.Sprintf("# used by slirp4netns\n%s\t%s %s\n", slirp4netnsIP.String(), c.Hostname(), c.config.Name) - } - } + return nil +} - // Do we have a network namespace? - netNone := false - if c.config.NetNsCtr == "" && !c.config.CreateNetNS { +func getLocalhostHostEntry(c *Container) etchosts.HostEntries { + return etchosts.HostEntries{{IP: "127.0.0.1", Names: []string{c.Hostname(), c.config.Name}}} +} + +// getHostsEntries returns the container ip host entries for the correct netmode +func (c *Container) getHostsEntries() (etchosts.HostEntries, error) { + var entries etchosts.HostEntries + names := []string{c.Hostname(), c.config.Name} + switch { + case c.config.NetMode.IsBridge(): + entries = etchosts.GetNetworkHostEntries(c.state.NetworkStatus, names...) + case c.config.NetMode.IsSlirp4netns(): + ip, err := GetSlirp4netnsIP(c.slirp4netnsSubnet) + if err != nil { + return nil, err + } + entries = etchosts.HostEntries{{IP: ip.String(), Names: names}} + default: + // check for net=none + if !c.config.CreateNetNS { for _, ns := range c.config.Spec.Linux.Namespaces { if ns.Type == spec.NetworkNamespace { if ns.Path == "" { - netNone = true + entries = etchosts.HostEntries{{IP: "127.0.0.1", Names: names}} } break } } } - // If we are net=none (have a network namespace, but not connected to - // anything) add the container's name and hostname to localhost. - if netNone { - hosts += fmt.Sprintf("127.0.0.1 %s %s\n", c.Hostname(), c.config.Name) - } } + return entries, nil +} - // Add gateway entry if we are not in a machine. If we use podman machine - // the gvproxy dns server will take care of host.containers.internal. - // https://github.com/containers/gvisor-tap-vsock/commit/1108ea45162281046d239047a6db9bc187e64b08 - if !c.runtime.config.Engine.MachineEnabled { - var depCtr *Container - netStatus := c.getNetworkStatus() - if c.config.NetNsCtr != "" { - // ignoring the error because there isn't anything to do - depCtr, _ = c.getRootNetNsDepCtr() - } else if len(netStatus) != 0 { - depCtr = c - } - - if depCtr != nil { - for _, status := range depCtr.getNetworkStatus() { - for _, netInt := range status.Interfaces { - for _, netAddress := range netInt.Networks { - if netAddress.Gateway != nil { - hosts += fmt.Sprintf("%s host.containers.internal\n", netAddress.Gateway.String()) - } - } - } - } - } else if c.config.NetMode.IsSlirp4netns() { - gatewayIP, err := GetSlirp4netnsGateway(c.slirp4netnsSubnet) - if err != nil { - logrus.Warn("Failed to determine gatewayIP: ", err.Error()) - } else { - hosts += fmt.Sprintf("%s host.containers.internal\n", gatewayIP.String()) - } - } else { - logrus.Debug("Network configuration does not support host.containers.internal address") +func (c *Container) createHosts() error { + var containerIPsEntries etchosts.HostEntries + var err error + // if we configure the netns after the container create we should not add + // the hosts here since we have no information about the actual ips + // instead we will add them in c.completeNetworkSetup() + if !c.config.PostConfigureNetNS { + containerIPsEntries, err = c.getHostsEntries() + if err != nil { + return fmt.Errorf("failed to get container ip host entries: %w", err) } } + baseHostFile, err := etchosts.GetBaseHostFile(c.runtime.config.Containers.BaseHostsFile, c.state.Mountpoint) + if err != nil { + return err + } + + targetFile := filepath.Join(c.state.RunDir, "hosts") + err = etchosts.New(&etchosts.Params{ + BaseFile: baseHostFile, + ExtraHosts: c.config.HostAdd, + ContainerIPs: containerIPsEntries, + HostContainersInternalIP: etchosts.GetHostContainersInternalIP(c.runtime.config, c.state.NetworkStatus, c.runtime.network), + TargetFile: targetFile, + }) + if err != nil { + return err + } + + return c.bindMountRootFile(targetFile, config.DefaultHostsFile) +} + +// bindMountRootFile will chown and relabel the source file to make it usable in the container. +// It will also add the path to the container bind mount map. +// source is the path on the host, dest is the path in the container. +func (c *Container) bindMountRootFile(source, dest string) error { + if err := os.Chown(source, c.RootUID(), c.RootGID()); err != nil { + return err + } + if err := label.Relabel(source, c.MountLabel(), false); err != nil { + return err + } - return hosts + return c.mountIntoRootDirs(dest, source) } // generateGroupEntry generates an entry or entries into /etc/group as @@ -2146,7 +2633,7 @@ func (c *Container) generateGroupEntry() (string, error) { addedGID = gid } if c.config.User != "" { - entry, _, err := c.generateUserGroupEntry(addedGID) + entry, err := c.generateUserGroupEntry(addedGID) if err != nil { return "", err } @@ -2199,9 +2686,9 @@ func (c *Container) generateCurrentUserGroupEntry() (string, int, error) { // Make an entry in /etc/group for the group the container was specified to run // as. -func (c *Container) generateUserGroupEntry(addedGID int) (string, int, error) { +func (c *Container) generateUserGroupEntry(addedGID int) (string, error) { if c.config.User == "" { - return "", 0, nil + return "", nil } splitUser := strings.SplitN(c.config.User, ":", 2) @@ -2212,20 +2699,20 @@ func (c *Container) generateUserGroupEntry(addedGID int) (string, int, error) { gid, err := strconv.ParseUint(group, 10, 32) if err != nil { - return "", 0, nil + return "", nil // nolint: nilerr } if addedGID != 0 && addedGID == int(gid) { - return "", 0, nil + return "", nil } // Check if the group already exists _, err = lookup.GetGroup(c.state.Mountpoint, group) if err != runcuser.ErrNoGroupEntries { - return "", 0, err + return "", err } - return fmt.Sprintf("%d:x:%d:%s\n", gid, gid, splitUser[0]), int(gid), nil + return fmt.Sprintf("%d:x:%d:%s\n", gid, gid, splitUser[0]), nil } // generatePasswdEntry generates an entry or entries into /etc/passwd as @@ -2237,12 +2724,25 @@ func (c *Container) generateUserGroupEntry(addedGID int) (string, int, error) { // /etc/passwd via AddCurrentUserPasswdEntry (though this does not trigger if // the user in question already exists in /etc/passwd) or the UID to be added // is 0). +// 3. The user specified additional host user accounts to add the the /etc/passwd file // Returns password entry (as a string that can be appended to /etc/passwd) and // any error that occurred. func (c *Container) generatePasswdEntry() (string, error) { passwdString := "" addedUID := 0 + for _, userid := range c.config.HostUsers { + // Lookup User on host + u, err := util.LookupUser(userid) + if err != nil { + return "", err + } + entry, err := c.userPasswdEntry(u) + if err != nil { + return "", err + } + passwdString += entry + } if c.config.AddCurrentUserPasswdEntry { entry, uid, _, err := c.generateCurrentUserPasswdEntry() if err != nil { @@ -2252,7 +2752,7 @@ func (c *Container) generatePasswdEntry() (string, error) { addedUID = uid } if c.config.User != "" { - entry, _, _, err := c.generateUserPasswdEntry(addedUID) + entry, err := c.generateUserPasswdEntry(addedUID) if err != nil { return "", err } @@ -2275,17 +2775,25 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) { if err != nil { return "", 0, 0, errors.Wrapf(err, "failed to get current user") } + pwd, err := c.userPasswdEntry(u) + if err != nil { + return "", 0, 0, err + } + + return pwd, uid, rootless.GetRootlessGID(), nil +} +func (c *Container) userPasswdEntry(u *user.User) (string, error) { // Lookup the user to see if it exists in the container image. - _, err = lookup.GetUser(c.state.Mountpoint, u.Username) + _, err := lookup.GetUser(c.state.Mountpoint, u.Username) if err != runcuser.ErrNoPasswdEntries { - return "", 0, 0, err + return "", err } // Lookup the UID to see if it exists in the container image. _, err = lookup.GetUser(c.state.Mountpoint, u.Uid) if err != runcuser.ErrNoPasswdEntries { - return "", 0, 0, err + return "", err } // If the user's actual home directory exists, or was mounted in - use @@ -2318,8 +2826,11 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) { if !hasHomeSet { c.config.Spec.Process.Env = append(c.config.Spec.Process.Env, fmt.Sprintf("HOME=%s", homeDir)) } + if c.config.PasswdEntry != "" { + return c.passwdEntry(u.Username, u.Uid, u.Gid, u.Name, homeDir), nil + } - return fmt.Sprintf("%s:*:%s:%s:%s:%s:/bin/sh\n", u.Username, u.Uid, u.Gid, u.Name, homeDir), uid, rootless.GetRootlessGID(), nil + return fmt.Sprintf("%s:*:%s:%s:%s:%s:/bin/sh\n", u.Username, u.Uid, u.Gid, u.Name, homeDir), nil } // generateUserPasswdEntry generates an /etc/passwd entry for the container user @@ -2328,13 +2839,13 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) { // Accepts one argument, that being any UID that has already been added to the // passwd file by other functions; if it matches the UID we were given, we don't // need to do anything. -func (c *Container) generateUserPasswdEntry(addedUID int) (string, int, int, error) { +func (c *Container) generateUserPasswdEntry(addedUID int) (string, error) { var ( groupspec string gid int ) if c.config.User == "" { - return "", 0, 0, nil + return "", nil } splitSpec := strings.SplitN(c.config.User, ":", 2) userspec := splitSpec[0] @@ -2344,17 +2855,17 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, int, int, err // If a non numeric User, then don't generate passwd uid, err := strconv.ParseUint(userspec, 10, 32) if err != nil { - return "", 0, 0, nil + return "", nil // nolint: nilerr } if addedUID != 0 && int(uid) == addedUID { - return "", 0, 0, nil + return "", nil } // Lookup the user to see if it exists in the container image _, err = lookup.GetUser(c.state.Mountpoint, userspec) if err != runcuser.ErrNoPasswdEntries { - return "", 0, 0, err + return "", err } if groupspec != "" { @@ -2364,17 +2875,33 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, int, int, err } else { group, err := lookup.GetGroup(c.state.Mountpoint, groupspec) if err != nil { - return "", 0, 0, errors.Wrapf(err, "unable to get gid %s from group file", groupspec) + return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec) } gid = group.Gid } } - return fmt.Sprintf("%d:*:%d:%d:container user:%s:/bin/sh\n", uid, uid, gid, c.WorkingDir()), int(uid), gid, nil + + if c.config.PasswdEntry != "" { + entry := c.passwdEntry(fmt.Sprintf("%d", uid), fmt.Sprintf("%d", uid), fmt.Sprintf("%d", gid), "container user", c.WorkingDir()) + return entry, nil + } + + return fmt.Sprintf("%d:*:%d:%d:container user:%s:/bin/sh\n", uid, uid, gid, c.WorkingDir()), nil +} + +func (c *Container) passwdEntry(username string, uid, gid, name, homeDir string) string { + s := c.config.PasswdEntry + s = strings.ReplaceAll(s, "$USERNAME", username) + s = strings.ReplaceAll(s, "$UID", uid) + s = strings.ReplaceAll(s, "$GID", gid) + s = strings.ReplaceAll(s, "$NAME", name) + s = strings.ReplaceAll(s, "$HOME", homeDir) + return s + "\n" } // generatePasswdAndGroup generates container-specific passwd and group files // iff g.config.User is a number or we are configured to make a passwd entry for -// the current user. +// the current user or the user specified HostsUsers // Returns path to file to mount at /etc/passwd, path to file to mount at // /etc/group, and any error that occurred. If no passwd/group file were // required, the empty string will be returned for those path (this may occur @@ -2385,7 +2912,8 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, int, int, err // with a bind mount). This is done in cases where the container is *not* // read-only. In this case, the function will return nothing ("", "", nil). func (c *Container) generatePasswdAndGroup() (string, string, error) { - if !c.config.AddCurrentUserPasswdEntry && c.config.User == "" { + if !c.config.AddCurrentUserPasswdEntry && c.config.User == "" && + len(c.config.HostUsers) == 0 { return "", "", nil } @@ -2551,6 +3079,24 @@ func isRootlessCgroupSet(cgroup string) bool { return cgroup != CgroupfsDefaultCgroupParent && filepath.Dir(cgroup) != CgroupfsDefaultCgroupParent } +func (c *Container) expectPodCgroup() (bool, error) { + unified, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return false, err + } + cgroupManager := c.CgroupManager() + switch { + case c.config.NoCgroups: + return false, nil + case cgroupManager == config.SystemdCgroupsManager: + return !rootless.IsRootless() || unified, nil + case cgroupManager == config.CgroupfsCgroupsManager: + return !rootless.IsRootless(), nil + default: + return false, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup mode %s requested for pods", cgroupManager) + } +} + // Get cgroup path in a format suitable for the OCI spec func (c *Container) getOCICgroupPath() (string, error) { unified, err := cgroups.IsCgroup2UnifiedMode() @@ -2566,13 +3112,13 @@ func (c *Container) getOCICgroupPath() (string, error) { if err != nil { return "", err } - return filepath.Join(selfCgroup, "container"), nil + return filepath.Join(selfCgroup, fmt.Sprintf("libpod-payload-%s", c.ID())), nil case cgroupManager == config.SystemdCgroupsManager: // When the OCI runtime is set to use Systemd as a cgroup manager, it // expects cgroups to be passed as follows: // slice:prefix:name systemdCgroups := fmt.Sprintf("%s:libpod:%s", path.Base(c.config.CgroupParent), c.ID()) - logrus.Debugf("Setting CGroups for container %s to %s", c.ID(), systemdCgroups) + logrus.Debugf("Setting Cgroups for container %s to %s", c.ID(), systemdCgroups) return systemdCgroups, nil case (rootless.IsRootless() && (cgroupManager == config.CgroupfsCgroupsManager || !unified)): if c.config.CgroupParent == "" || !isRootlessCgroupSet(c.config.CgroupParent) { @@ -2581,7 +3127,7 @@ func (c *Container) getOCICgroupPath() (string, error) { fallthrough case cgroupManager == config.CgroupfsCgroupsManager: cgroupPath := filepath.Join(c.config.CgroupParent, fmt.Sprintf("libpod-%s", c.ID())) - logrus.Debugf("Setting CGroup path for container %s to %s", c.ID(), cgroupPath) + logrus.Debugf("Setting Cgroup path for container %s to %s", c.ID(), cgroupPath) return cgroupPath, nil default: return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", cgroupManager) @@ -2589,7 +3135,7 @@ func (c *Container) getOCICgroupPath() (string, error) { } func (c *Container) copyTimezoneFile(zonePath string) (string, error) { - var localtimeCopy string = filepath.Join(c.state.RunDir, "localtime") + localtimeCopy := filepath.Join(c.state.RunDir, "localtime") file, err := os.Stat(zonePath) if err != nil { return "", err @@ -2611,7 +3157,7 @@ func (c *Container) copyTimezoneFile(zonePath string) (string, error) { if err != nil { return "", err } - if err := label.Relabel(localtimeCopy, c.config.MountLabel, false); err != nil { + if err := c.relabel(localtimeCopy, c.config.MountLabel, false); err != nil { return "", err } if err := dest.Chown(c.RootUID(), c.RootGID()); err != nil { @@ -2732,7 +3278,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { return err } } - if err := os.Chmod(mountPoint, st.Mode()|0111); err != nil { + if err := os.Chmod(mountPoint, st.Mode()); err != nil { return err } stat := st.Sys().(*syscall.Stat_t) @@ -2746,3 +3292,37 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { } return nil } + +func (c *Container) relabel(src, mountLabel string, recurse bool) error { + if !selinux.GetEnabled() || mountLabel == "" { + return nil + } + // only relabel on initial creation of container + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateUnknown) { + label, err := label.FileLabel(src) + if err != nil { + return err + } + // If labels are different, might be on a tmpfs + if label == mountLabel { + return nil + } + } + return label.Relabel(src, mountLabel, recurse) +} + +func (c *Container) ChangeHostPathOwnership(src string, recurse bool, uid, gid int) error { + // only chown on initial creation of container + if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateUnknown) { + st, err := os.Stat(src) + if err != nil { + return err + } + + // If labels are different, might be on a tmpfs + if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid { + return nil + } + } + return chown.ChangeHostPathOwnership(src, recurse, uid, gid) +} diff --git a/vendor/github.com/containers/podman/v3/libpod/container_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v3/libpod/container_linux.go rename to vendor/github.com/containers/podman/v4/libpod/container_linux.go index c445fb8af45..8b517e69f13 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod diff --git a/vendor/github.com/containers/podman/v3/libpod/container_log.go b/vendor/github.com/containers/podman/v4/libpod/container_log.go similarity index 87% rename from vendor/github.com/containers/podman/v3/libpod/container_log.go rename to vendor/github.com/containers/podman/v4/libpod/container_log.go index 18840bff25b..7a9eb2dbf21 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_log.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_log.go @@ -6,10 +6,10 @@ import ( "os" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/libpod/logs" - "github.com/hpcloud/tail/watch" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/libpod/logs" + "github.com/nxadm/tail/watch" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -23,8 +23,8 @@ func init() { // Log is a runtime function that can read one or more container logs. func (r *Runtime) Log(ctx context.Context, containers []*Container, options *logs.LogOptions, logChannel chan *logs.LogLine) error { - for _, ctr := range containers { - if err := ctr.ReadLog(ctx, options, logChannel); err != nil { + for c, ctr := range containers { + if err := ctr.ReadLog(ctx, options, logChannel, int64(c)); err != nil { return err } } @@ -32,26 +32,26 @@ func (r *Runtime) Log(ctx context.Context, containers []*Container, options *log } // ReadLog reads a containers log based on the input options and returns log lines over a channel. -func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine) error { +func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine, colorID int64) error { switch c.LogDriver() { case define.PassthroughLogging: return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot read logs") case define.NoLogging: return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs") case define.JournaldLogging: - return c.readFromJournal(ctx, options, logChannel) + return c.readFromJournal(ctx, options, logChannel, colorID) case define.JSONLogging: // TODO provide a separate implementation of this when Conmon // has support. fallthrough case define.KubernetesLogging, "": - return c.readFromLogFile(ctx, options, logChannel) + return c.readFromLogFile(ctx, options, logChannel, colorID) default: return errors.Wrapf(define.ErrInternal, "unrecognized log driver %q, cannot read logs", c.LogDriver()) } } -func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine) error { +func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine, colorID int64) error { t, tailLog, err := logs.GetLogFile(c.LogPath(), options) if err != nil { // If the log file does not exist, this is not fatal. @@ -65,6 +65,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption for _, nll := range tailLog { nll.CID = c.ID() nll.CName = c.Name() + nll.ColorID = colorID if nll.Since(options.Since) && nll.Until(options.Until) { logChannel <- nll } @@ -97,6 +98,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption } nll.CID = c.ID() nll.CName = c.Name() + nll.ColorID = colorID if nll.Since(options.Since) && nll.Until(options.Until) { logChannel <- nll } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_log_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_log_linux.go similarity index 83% rename from vendor/github.com/containers/podman/v3/libpod/container_log_linux.go rename to vendor/github.com/containers/podman/v4/libpod/container_log_linux.go index ca1e11ef5e2..deb726526d0 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_log_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_log_linux.go @@ -1,5 +1,5 @@ -//+build linux -//+build systemd +//go:build linux && systemd +// +build linux,systemd package libpod @@ -9,9 +9,9 @@ import ( "strings" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/libpod/logs" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/libpod/logs" "github.com/coreos/go-systemd/v22/journal" "github.com/coreos/go-systemd/v22/sdjournal" "github.com/pkg/errors" @@ -37,13 +37,21 @@ func (c *Container) initializeJournal(ctx context.Context) error { m := make(map[string]string) m["SYSLOG_IDENTIFIER"] = "podman" m["PODMAN_ID"] = c.ID() - m["CONTAINER_ID_FULL"] = c.ID() history := events.History m["PODMAN_EVENT"] = history.String() + container := events.Container + m["PODMAN_TYPE"] = container.String() + m["PODMAN_TIME"] = time.Now().Format(time.RFC3339Nano) return journal.Send("", journal.PriInfo, m) } -func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine) error { +func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine, colorID int64) error { + // We need the container's events in the same journal to guarantee + // consistency, see #10323. + if options.Follow && c.runtime.config.Engine.EventsLogger != "journald" { + return errors.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger) + } + journal, err := sdjournal.NewJournal() if err != nil { return err @@ -89,10 +97,15 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption // exponential backoff. var cursor string var cursorError error + var containerCouldBeLogging bool for i := 1; i <= 3; i++ { cursor, cursorError = journal.GetCursor() + hundreds := 1 + for j := 1; j < i; j++ { + hundreds *= 2 + } if cursorError != nil { - time.Sleep(time.Duration(i*100) * time.Millisecond) + time.Sleep(time.Duration(hundreds*100) * time.Millisecond) continue } break @@ -101,12 +114,6 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption return errors.Wrap(cursorError, "initial journal cursor") } - // We need the container's events in the same journal to guarantee - // consistency, see #10323. - if options.Follow && c.runtime.config.Engine.EventsLogger != "journald" { - return errors.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger) - } - options.WaitGroup.Add(1) go func() { defer func() { @@ -117,7 +124,24 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption }() tailQueue := []*logs.LogLine{} // needed for options.Tail - doTail := options.Tail > 0 + doTail := options.Tail >= 0 + doTailFunc := func() { + // Flush *once* we hit the end of the journal. + startIndex := int64(len(tailQueue)) + outputLines := int64(0) + for startIndex > 0 && outputLines < options.Tail { + startIndex-- + for startIndex > 0 && tailQueue[startIndex].Partial() { + startIndex-- + } + outputLines++ + } + for i := startIndex; i < int64(len(tailQueue)); i++ { + logChannel <- tailQueue[i] + } + tailQueue = nil + doTail = false + } lastReadCursor := "" for { select { @@ -148,19 +172,10 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption // Hit the end of the journal (so far?). if cursor == lastReadCursor { if doTail { - // Flush *once* we hit the end of the journal. - startIndex := int64(len(tailQueue)-1) - options.Tail - if startIndex < 0 { - startIndex = 0 - } - for i := startIndex; i < int64(len(tailQueue)); i++ { - logChannel <- tailQueue[i] - } - tailQueue = nil - doTail = false + doTailFunc() } // Unless we follow, quit. - if !options.Follow { + if !options.Follow || !containerCouldBeLogging { return } // Sleep until something's happening on the journal. @@ -189,8 +204,11 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption logrus.Errorf("Failed to translate event: %v", err) return } - if status == events.Exited { - return + switch status { + case events.History, events.Init, events.Start, events.Restart: + containerCouldBeLogging = true + case events.Exited: + containerCouldBeLogging = false } continue } @@ -205,15 +223,19 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption } if formatError != nil { - logrus.Errorf("Failed to parse journald log entry: %v", err) + logrus.Errorf("Failed to parse journald log entry: %v", formatError) return } logLine, err := logs.NewJournaldLogLine(message, options.Multi) + logLine.ColorID = colorID if err != nil { logrus.Errorf("Failed parse log line: %v", err) return } + if options.UseName { + logLine.CName = c.Name() + } if doTail { tailQueue = append(tailQueue, logLine) continue diff --git a/vendor/github.com/containers/podman/v3/libpod/container_log_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/container_log_unsupported.go similarity index 66% rename from vendor/github.com/containers/podman/v3/libpod/container_log_unsupported.go rename to vendor/github.com/containers/podman/v4/libpod/container_log_unsupported.go index a551df94261..c84a578ccfe 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_log_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_log_unsupported.go @@ -1,16 +1,17 @@ -//+build !linux !systemd +//go:build !linux || !systemd +// +build !linux !systemd package libpod import ( "context" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/logs" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/logs" "github.com/pkg/errors" ) -func (c *Container) readFromJournal(_ context.Context, _ *logs.LogOptions, _ chan *logs.LogLine) error { +func (c *Container) readFromJournal(_ context.Context, _ *logs.LogOptions, _ chan *logs.LogLine, colorID int64) error { return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux") } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_path_resolution.go b/vendor/github.com/containers/podman/v4/libpod/container_path_resolution.go similarity index 98% rename from vendor/github.com/containers/podman/v3/libpod/container_path_resolution.go rename to vendor/github.com/containers/podman/v4/libpod/container_path_resolution.go index bb2ef1a73ff..80a3749f5e2 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_path_resolution.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_path_resolution.go @@ -1,4 +1,3 @@ -// +linux package libpod import ( @@ -161,7 +160,7 @@ func isPathOnBindMount(c *Container, containerPath string) bool { if cleanedContainerPath == filepath.Clean(m.Destination) { return true } - for dest := m.Destination; dest != "/"; dest = filepath.Dir(dest) { + for dest := m.Destination; dest != "/" && dest != "."; dest = filepath.Dir(dest) { if cleanedContainerPath == dest { return true } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_stat_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_stat_linux.go similarity index 88% rename from vendor/github.com/containers/podman/v3/libpod/container_stat_linux.go rename to vendor/github.com/containers/podman/v4/libpod/container_stat_linux.go index 0b4d9e2df1b..bbe3edbb311 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_stat_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_stat_linux.go @@ -1,28 +1,28 @@ +//go:build linux // +build linux package libpod import ( - "context" "os" "path/filepath" "strings" "github.com/containers/buildah/copier" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/copy" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/copy" "github.com/pkg/errors" ) // statInsideMount stats the specified path *inside* the container's mount and PID // namespace. It returns the file info along with the resolved root ("/") and // the resolved path (relative to the root). -func (c *Container) statInsideMount(ctx context.Context, containerPath string) (*copier.StatForItem, string, string, error) { +func (c *Container) statInsideMount(containerPath string) (*copier.StatForItem, string, string, error) { resolvedRoot := "/" resolvedPath := c.pathAbs(containerPath) var statInfo *copier.StatForItem - err := c.joinMountAndExec(ctx, + err := c.joinMountAndExec( func() error { var statErr error statInfo, statErr = secureStat(resolvedRoot, resolvedPath) @@ -37,7 +37,7 @@ func (c *Container) statInsideMount(ctx context.Context, containerPath string) ( // along with the resolved root and the resolved path. Both paths are absolute // to the host's root. Note that the paths may resolved outside the // container's mount point (e.g., to a volume or bind mount). -func (c *Container) statOnHost(ctx context.Context, mountPoint string, containerPath string) (*copier.StatForItem, string, string, error) { +func (c *Container) statOnHost(mountPoint string, containerPath string) (*copier.StatForItem, string, string, error) { // Now resolve the container's path. It may hit a volume, it may hit a // bind mount, it may be relative. resolvedRoot, resolvedPath, err := c.resolvePath(mountPoint, containerPath) @@ -49,7 +49,7 @@ func (c *Container) statOnHost(ctx context.Context, mountPoint string, container return statInfo, resolvedRoot, resolvedPath, err } -func (c *Container) stat(ctx context.Context, containerMountPoint string, containerPath string) (*define.FileInfo, string, string, error) { +func (c *Container) stat(containerMountPoint string, containerPath string) (*define.FileInfo, string, string, error) { var ( resolvedRoot string resolvedPath string @@ -74,11 +74,11 @@ func (c *Container) stat(ctx context.Context, containerMountPoint string, contai if c.state.State == define.ContainerStateRunning { // If the container is running, we need to join it's mount namespace // and stat there. - statInfo, resolvedRoot, resolvedPath, statErr = c.statInsideMount(ctx, containerPath) + statInfo, resolvedRoot, resolvedPath, statErr = c.statInsideMount(containerPath) } else { // If the container is NOT running, we need to resolve the path // on the host. - statInfo, resolvedRoot, resolvedPath, statErr = c.statOnHost(ctx, containerMountPoint, containerPath) + statInfo, resolvedRoot, resolvedPath, statErr = c.statOnHost(containerMountPoint, containerPath) } if statErr != nil { @@ -94,15 +94,16 @@ func (c *Container) stat(ctx context.Context, containerMountPoint string, contai } } - if statInfo.IsSymlink { + switch { + case statInfo.IsSymlink: // Symlinks are already evaluated and always relative to the // container's mount point. absContainerPath = statInfo.ImmediateTarget - } else if strings.HasPrefix(resolvedPath, containerMountPoint) { + case strings.HasPrefix(resolvedPath, containerMountPoint): // If the path is on the container's mount point, strip it off. absContainerPath = strings.TrimPrefix(resolvedPath, containerMountPoint) absContainerPath = filepath.Join("/", absContainerPath) - } else { + default: // No symlink and not on the container's mount point, so let's // move it back to the original input. It must have evaluated // to a volume or bind mount but we cannot return host paths. diff --git a/vendor/github.com/containers/podman/v3/libpod/container_top_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go similarity index 88% rename from vendor/github.com/containers/podman/v3/libpod/container_top_linux.go rename to vendor/github.com/containers/podman/v4/libpod/container_top_linux.go index 0d4cba85ede..9b3dbc873cb 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_top_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go @@ -1,16 +1,19 @@ +//go:build linux // +build linux package libpod import ( "bufio" + "fmt" "os" "strconv" "strings" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/psgo" + "github.com/google/shlex" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -51,7 +54,21 @@ func (c *Container) Top(descriptors []string) ([]string, error) { return nil, psgoErr } - output, err = c.execPS(descriptors) + // Note that the descriptors to ps(1) must be shlexed (see #12452). + psDescriptors := []string{} + for _, d := range descriptors { + shSplit, err := shlex.Split(d) + if err != nil { + return nil, fmt.Errorf("parsing ps args: %v", err) + } + for _, s := range shSplit { + if s != "" { + psDescriptors = append(psDescriptors, s) + } + } + } + + output, err = c.execPS(psDescriptors) if err != nil { return nil, errors.Wrapf(err, "error executing ps(1) in the container") } diff --git a/vendor/github.com/containers/podman/v3/libpod/container_validate.go b/vendor/github.com/containers/podman/v4/libpod/container_validate.go similarity index 89% rename from vendor/github.com/containers/podman/v3/libpod/container_validate.go rename to vendor/github.com/containers/podman/v4/libpod/container_validate.go index 91ebe93fbaf..c6c9a4c6d70 100644 --- a/vendor/github.com/containers/podman/v3/libpod/container_validate.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_validate.go @@ -1,7 +1,7 @@ package libpod import ( - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -57,13 +57,13 @@ func (c *Container) validate() error { if ns.Type == spec.PIDNamespace { foundPid = true if ns.Path != "" { - return errors.Wrapf(define.ErrInvalidArg, "containers not creating CGroups must create a private PID namespace - cannot use another") + return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace - cannot use another") } break } } if !foundPid { - return errors.Wrapf(define.ErrInvalidArg, "containers not creating CGroups must create a private PID namespace") + return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace") } } @@ -74,7 +74,7 @@ func (c *Container) validate() error { // Cannot set static IP or MAC if joining >1 CNI network. if len(c.config.Networks) > 1 && (c.config.StaticIP != nil || c.config.StaticMAC != nil) { - return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if joining more than one CNI network") + return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if joining more than one network") } // Using image resolv.conf conflicts with various DNS settings. @@ -115,17 +115,6 @@ func (c *Container) validate() error { destinations[vol.Dest] = true } - // Check that networks and network aliases match up. - ctrNets := make(map[string]bool) - for _, net := range c.config.Networks { - ctrNets[net] = true - } - for net := range c.config.NetworkAliases { - if _, ok := ctrNets[net]; !ok { - return errors.Wrapf(define.ErrNoSuchNetwork, "container tried to set network aliases for network %s but is not connected to the network", net) - } - } - // If User in the OCI spec is set, require that c.config.User is set for // security reasons (a lot of our code relies on c.config.User). if c.config.User == "" && (c.config.Spec.Process.User.UID != 0 || c.config.Spec.Process.User.GID != 0) { diff --git a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go new file mode 100644 index 00000000000..8f52799812b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go @@ -0,0 +1,152 @@ +package define + +const ( + // InspectAnnotationCIDFile is used by Inspect to determine if a + // container ID file was created for the container. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationCIDFile = "io.podman.annotations.cid-file" + // InspectAnnotationAutoremove is used by Inspect to determine if a + // container will be automatically removed on exit. + // If an annotation with this key is found in the OCI spec and is one of + // the two supported boolean values (InspectResponseTrue and + // InspectResponseFalse) it will be used in the output of Inspect(). + InspectAnnotationAutoremove = "io.podman.annotations.autoremove" + // InspectAnnotationVolumesFrom is used by Inspect to identify + // containers whose volumes are are being used by this container. + // It is expected to be a comma-separated list of container names and/or + // IDs. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationVolumesFrom = "io.podman.annotations.volumes-from" + // InspectAnnotationPrivileged is used by Inspect to identify containers + // which are privileged (IE, running with elevated privileges). + // It is expected to be a boolean, populated by one of + // InspectResponseTrue or InspectResponseFalse. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationPrivileged = "io.podman.annotations.privileged" + // InspectAnnotationPublishAll is used by Inspect to identify containers + // which have all the ports from their image published. + // It is expected to be a boolean, populated by one of + // InspectResponseTrue or InspectResponseFalse. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationPublishAll = "io.podman.annotations.publish-all" + // InspectAnnotationInit is used by Inspect to identify containers that + // mount an init binary in. + // It is expected to be a boolean, populated by one of + // InspectResponseTrue or InspectResponseFalse. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationInit = "io.podman.annotations.init" + // InspectAnnotationLabel is used by Inspect to identify containers with + // special SELinux-related settings. It is used to populate the output + // of the SecurityOpt setting. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationLabel = "io.podman.annotations.label" + // InspectAnnotationSeccomp is used by Inspect to identify containers + // with special Seccomp-related settings. It is used to populate the + // output of the SecurityOpt setting in Inspect. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationSeccomp = "io.podman.annotations.seccomp" + // InspectAnnotationApparmor is used by Inspect to identify containers + // with special Apparmor-related settings. It is used to populate the + // output of the SecurityOpt setting. + // If an annotation with this key is found in the OCI spec, it will be + // used in the output of Inspect(). + InspectAnnotationApparmor = "io.podman.annotations.apparmor" + + // InspectResponseTrue is a boolean True response for an inspect + // annotation. + InspectResponseTrue = "TRUE" + // InspectResponseFalse is a boolean False response for an inspect + // annotation. + InspectResponseFalse = "FALSE" + + // CheckpointAnnotationName is used by Container Checkpoint when creating a + // checkpoint image to specify the original human-readable name for the + // container. + CheckpointAnnotationName = "io.podman.annotations.checkpoint.name" + + // CheckpointAnnotationRawImageName is used by Container Checkpoint when + // creating a checkpoint image to specify the original unprocessed name of + // the image used to create the container (as specified by the user). + CheckpointAnnotationRawImageName = "io.podman.annotations.checkpoint.rawImageName" + + // CheckpointAnnotationRootfsImageID is used by Container Checkpoint when + // creating a checkpoint image to specify the original ID of the image used + // to create the container. + CheckpointAnnotationRootfsImageID = "io.podman.annotations.checkpoint.rootfsImageID" + + // CheckpointAnnotationRootfsImageName is used by Container Checkpoint when + // creating a checkpoint image to specify the original image name used to + // create the container. + CheckpointAnnotationRootfsImageName = "io.podman.annotations.checkpoint.rootfsImageName" + + // CheckpointAnnotationPodmanVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of Podman used on the + // host where the checkpoint was created. + CheckpointAnnotationPodmanVersion = "io.podman.annotations.checkpoint.podman.version" + + // CheckpointAnnotationCriuVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of CRIU used on the + // host where the checkpoint was created. + CheckpointAnnotationCriuVersion = "io.podman.annotations.checkpoint.criu.version" + + // CheckpointAnnotationRuntimeName is used by Container Checkpoint when + // creating a checkpoint image to specify the runtime used on the host where + // the checkpoint was created. + CheckpointAnnotationRuntimeName = "io.podman.annotations.checkpoint.runtime.name" + + // CheckpointAnnotationRuntimeVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of runtime used on the + // host where the checkpoint was created. + CheckpointAnnotationRuntimeVersion = "io.podman.annotations.checkpoint.runtime.version" + + // CheckpointAnnotationConmonVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the version of conmon used on + // the host where the checkpoint was created. + CheckpointAnnotationConmonVersion = "io.podman.annotations.checkpoint.conmon.version" + + // CheckpointAnnotationHostArch is used by Container Checkpoint when + // creating a checkpoint image to specify the CPU architecture of the host + // on which the checkpoint was created. + CheckpointAnnotationHostArch = "io.podman.annotations.checkpoint.host.arch" + + // CheckpointAnnotationHostKernel is used by Container Checkpoint when + // creating a checkpoint image to specify the kernel version used by the + // host where the checkpoint was created. + CheckpointAnnotationHostKernel = "io.podman.annotations.checkpoint.host.kernel" + + // CheckpointAnnotationCgroupVersion is used by Container Checkpoint when + // creating a checkpoint image to specify the cgroup version used by the + // host where the checkpoint was created. + CheckpointAnnotationCgroupVersion = "io.podman.annotations.checkpoint.cgroups.version" + + // CheckpointAnnotationDistributionVersion is used by Container Checkpoint + // when creating a checkpoint image to specify the version of host + // distribution on which the checkpoint was created. + CheckpointAnnotationDistributionVersion = "io.podman.annotations.checkpoint.distribution.version" + + // CheckpointAnnotationDistributionName is used by Container Checkpoint when + // creating a checkpoint image to specify the name of host distribution on + // which the checkpoint was created. + CheckpointAnnotationDistributionName = "io.podman.annotations.checkpoint.distribution.name" + // MaxKubeAnnotation is the max length of annotations allowed by Kubernetes. + MaxKubeAnnotation = 63 +) + +// IsReservedAnnotation returns true if the specified value corresponds to an +// already reserved annotation that Podman sets during container creation. +func IsReservedAnnotation(value string) bool { + switch value { + case InspectAnnotationCIDFile, InspectAnnotationAutoremove, InspectAnnotationVolumesFrom, InspectAnnotationPrivileged, InspectAnnotationPublishAll, InspectAnnotationInit, InspectAnnotationLabel, InspectAnnotationSeccomp, InspectAnnotationApparmor, InspectResponseTrue, InspectResponseFalse: + return true + + default: + return false + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go b/vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go new file mode 100644 index 00000000000..536bdde9a3c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go @@ -0,0 +1,32 @@ +package define + +// This contains values reported by CRIU during +// checkpointing or restoring. +// All names are the same as reported by CRIU. +type CRIUCheckpointRestoreStatistics struct { + // Checkpoint values + // Time required to freeze/pause/quiesce the processes + FreezingTime uint32 `json:"freezing_time,omitempty"` + // Time the processes are actually not running during checkpointing + FrozenTime uint32 `json:"frozen_time,omitempty"` + // Time required to extract memory pages from the processes + MemdumpTime uint32 `json:"memdump_time,omitempty"` + // Time required to write memory pages to disk + MemwriteTime uint32 `json:"memwrite_time,omitempty"` + // Number of memory pages CRIU analyzed + PagesScanned uint64 `json:"pages_scanned,omitempty"` + // Number of memory pages written + PagesWritten uint64 `json:"pages_written,omitempty"` + + // Restore values + // Number of pages compared during restore + PagesCompared uint64 `json:"pages_compared,omitempty"` + // Number of COW pages skipped during restore + PagesSkippedCow uint64 `json:"pages_skipped_cow,omitempty"` + // Time required to fork processes + ForkingTime uint32 `json:"forking_time,omitempty"` + // Time required to restore + RestoreTime uint32 `json:"restore_time,omitempty"` + // Number of memory pages restored + PagesRestored uint64 `json:"pages_restored,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v3/libpod/define/config.go b/vendor/github.com/containers/podman/v4/libpod/define/config.go similarity index 94% rename from vendor/github.com/containers/podman/v3/libpod/define/config.go rename to vendor/github.com/containers/podman/v4/libpod/define/config.go index a5cf07afc52..0181bd31ce0 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/config.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/config.go @@ -5,7 +5,7 @@ import ( "io" "regexp" - "github.com/pkg/errors" + "github.com/containers/common/libnetwork/types" ) var ( @@ -17,9 +17,9 @@ var ( // NameRegex is a regular expression to validate container/pod names. // This must NOT be changed from outside of Libpod. It should be a // constant, but Go won't let us do that. - NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + NameRegex = types.NameRegex // RegexError is thrown in presence of an invalid container/pod name. - RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") + RegexError = types.RegexError // UmaskRegex is a regular expression to validate Umask. UmaskRegex = regexp.MustCompile(`^[0-7]{1,4}$`) ) diff --git a/vendor/github.com/containers/podman/v3/libpod/define/container.go b/vendor/github.com/containers/podman/v4/libpod/define/container.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/container.go rename to vendor/github.com/containers/podman/v4/libpod/define/container.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/container_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go similarity index 92% rename from vendor/github.com/containers/podman/v3/libpod/define/container_inspect.go rename to vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go index 7decb18a8e1..6cdffb8b7a6 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/container_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go @@ -6,6 +6,11 @@ import ( "github.com/containers/image/v5/manifest" ) +type InspectIDMappings struct { + UIDMap []string `json:"UidMap"` + GIDMap []string `json:"GidMap"` +} + // InspectContainerConfig holds further data about how a container was initially // configured. type InspectContainerConfig struct { @@ -68,6 +73,12 @@ type InspectContainerConfig struct { Timeout uint `json:"Timeout"` // StopTimeout is time before container is stopped when calling stop StopTimeout uint `json:"StopTimeout"` + // Passwd determines whether or not podman can add entries to /etc/passwd and /etc/group + Passwd *bool `json:"Passwd,omitempty"` + // ChrootDirs is an additional set of directories that need to be + // treated as root directories. Standard bind mounts will be mounted + // into paths relative to these directories. + ChrootDirs []string `json:"ChrootDirs,omitempty"` } // InspectRestartPolicy holds information about the container's restart policy. @@ -89,7 +100,7 @@ type InspectRestartPolicy struct { // InspectLogConfig holds information about a container's configured log driver type InspectLogConfig struct { Type string `json:"Type"` - Config map[string]string `json:"Config"` //idk type, TODO + Config map[string]string `json:"Config"` // Path specifies a path to the log file Path string `json:"Path"` // Tag specifies a custom log tag for the container @@ -189,21 +200,28 @@ type InspectMount struct { // Docker, but here we see more fields that are unused (nonsensical in the // context of Libpod). type InspectContainerState struct { - OciVersion string `json:"OciVersion"` - Status string `json:"Status"` - Running bool `json:"Running"` - Paused bool `json:"Paused"` - Restarting bool `json:"Restarting"` // TODO - OOMKilled bool `json:"OOMKilled"` - Dead bool `json:"Dead"` - Pid int `json:"Pid"` - ConmonPid int `json:"ConmonPid,omitempty"` - ExitCode int32 `json:"ExitCode"` - Error string `json:"Error"` // TODO - StartedAt time.Time `json:"StartedAt"` - FinishedAt time.Time `json:"FinishedAt"` - Health HealthCheckResults `json:"Health,omitempty"` - Checkpointed bool `json:"Checkpointed,omitempty"` + OciVersion string `json:"OciVersion"` + Status string `json:"Status"` + Running bool `json:"Running"` + Paused bool `json:"Paused"` + Restarting bool `json:"Restarting"` // TODO + OOMKilled bool `json:"OOMKilled"` + Dead bool `json:"Dead"` + Pid int `json:"Pid"` + ConmonPid int `json:"ConmonPid,omitempty"` + ExitCode int32 `json:"ExitCode"` + Error string `json:"Error"` // TODO + StartedAt time.Time `json:"StartedAt"` + FinishedAt time.Time `json:"FinishedAt"` + Health HealthCheckResults `json:"Health,omitempty"` + Checkpointed bool `json:"Checkpointed,omitempty"` + CgroupPath string `json:"CgroupPath,omitempty"` + CheckpointedAt time.Time `json:"CheckpointedAt,omitempty"` + RestoredAt time.Time `json:"RestoredAt,omitempty"` + CheckpointLog string `json:"CheckpointLog,omitempty"` + CheckpointPath string `json:"CheckpointPath,omitempty"` + RestoreLog string `json:"RestoreLog,omitempty"` + Restored bool `json:"Restored,omitempty"` } // Healthcheck returns the HealthCheckResults. This is used for old podman compat @@ -336,9 +354,9 @@ type InspectContainerHostConfig struct { // populated. // TODO. Cgroup string `json:"Cgroup"` - // Cgroups contains the container's CGroup mode. - // Allowed values are "default" (container is creating CGroups) and - // "disabled" (container is not creating CGroups). + // Cgroups contains the container's Cgroup mode. + // Allowed values are "default" (container is creating Cgroups) and + // "disabled" (container is not creating Cgroups). // This is Libpod-specific and not included in `docker inspect`. Cgroups string `json:"Cgroups"` // Links is unused, and provided purely for Docker compatibility. @@ -392,7 +410,10 @@ type InspectContainerHostConfig struct { // TODO Rootless has an additional 'keep-id' option, presently not // reflected here. UsernsMode string `json:"UsernsMode"` + // IDMappings is the UIDMapping and GIDMapping used within the container + IDMappings *InspectIDMappings `json:"IDMappings,omitempty"` // ShmSize is the size of the container's SHM device. + ShmSize int64 `json:"ShmSize"` // Runtime is provided purely for Docker compatibility. // It is set unconditionally to "oci" as Podman does not presently @@ -408,7 +429,7 @@ type InspectContainerHostConfig struct { Isolation string `json:"Isolation"` // CpuShares indicates the CPU resources allocated to the container. // It is a relative weight in the scheduler for assigning CPU time - // versus other CGroups. + // versus other Cgroups. CpuShares uint64 `json:"CpuShares"` // Memory indicates the memory resources allocated to the container. // This is the limit (in bytes) of RAM the container may use. @@ -425,12 +446,12 @@ type InspectContainerHostConfig struct { // 100000, we will set both CpuQuota, CpuPeriod, and NanoCpus. If // CpuQuota is not the default, we will not set NanoCpus. NanoCpus int64 `json:"NanoCpus"` - // CgroupParent is the CGroup parent of the container. + // CgroupParent is the Cgroup parent of the container. // Only set if not default. CgroupParent string `json:"CgroupParent"` // BlkioWeight indicates the I/O resources allocated to the container. // It is a relative weight in the scheduler for assigning I/O time - // versus other CGroups. + // versus other Cgroups. BlkioWeight uint16 `json:"BlkioWeight"` // BlkioWeightDevice is an array of I/O resource priorities for // individual device nodes. @@ -542,6 +563,12 @@ type InspectContainerHostConfig struct { CgroupConf map[string]string `json:"CgroupConf"` } +// Address represents an IP address. +type Address struct { + Addr string + PrefixLength int +} + // InspectBasicNetworkConfig holds basic configuration information (e.g. IP // addresses, MAC address, subnet masks, etc) that are common for all networks // (both additional and main). @@ -556,7 +583,7 @@ type InspectBasicNetworkConfig struct { IPPrefixLen int `json:"IPPrefixLen"` // SecondaryIPAddresses is a list of extra IP Addresses that the // container has been assigned in this network. - SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty"` + SecondaryIPAddresses []Address `json:"SecondaryIPAddresses,omitempty"` // IPv6Gateway is the IPv6 gateway this network will use. IPv6Gateway string `json:"IPv6Gateway"` // GlobalIPv6Address is the global-scope IPv6 Address for this network. @@ -565,7 +592,7 @@ type InspectBasicNetworkConfig struct { GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen"` // SecondaryIPv6Addresses is a list of extra IPv6 Addresses that the // container has been assigned in this network. - SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty"` + SecondaryIPv6Addresses []Address `json:"SecondaryIPv6Addresses,omitempty"` // MacAddress is the MAC address for the interface in this network. MacAddress string `json:"MacAddress"` // AdditionalMacAddresses is a set of additional MAC Addresses beyond @@ -653,8 +680,7 @@ type InspectContainerData struct { SizeRootFs int64 `json:"SizeRootFs,omitempty"` Mounts []InspectMount `json:"Mounts"` Dependencies []string `json:"Dependencies"` - NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` //TODO - ExitCommand []string `json:"ExitCommand"` + NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` Namespace string `json:"Namespace"` IsInfra bool `json:"IsInfra"` Config *InspectContainerConfig `json:"Config"` diff --git a/vendor/github.com/containers/podman/v3/libpod/define/containerstate.go b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go similarity index 88% rename from vendor/github.com/containers/podman/v3/libpod/define/containerstate.go rename to vendor/github.com/containers/podman/v4/libpod/define/containerstate.go index fc272beaa4d..9ad3aec08ed 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/containerstate.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go @@ -37,16 +37,23 @@ const ( ContainerStateStopping ContainerStatus = iota ) -// ContainerStatus returns a string representation for users -// of a container state +// ContainerStatus returns a string representation for users of a container +// state. All results should match Docker's versions (from `docker ps`) as +// closely as possible, given the different set of states we support. func (t ContainerStatus) String() string { switch t { case ContainerStateUnknown: return "unknown" case ContainerStateConfigured: - return "configured" - case ContainerStateCreated: + // The naming here is confusing, but it's necessary for Docker + // compatibility - their Created state is our Configured state. return "created" + case ContainerStateCreated: + // Docker does not have an equivalent to this state, so give it + // a clear name. Most of the time this is a purely transitory + // state between Configured and Running so we don't expect to + // see it much anyways. + return "initialized" case ContainerStateRunning: return "running" case ContainerStateStopped: @@ -131,7 +138,6 @@ type ContainerStats struct { CPU float64 CPUNano uint64 CPUSystemNano uint64 - DataPoints int64 SystemNano uint64 MemUsage uint64 MemLimit uint64 diff --git a/vendor/github.com/containers/podman/v3/libpod/define/diff.go b/vendor/github.com/containers/podman/v4/libpod/define/diff.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/diff.go rename to vendor/github.com/containers/podman/v4/libpod/define/diff.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/errors.go b/vendor/github.com/containers/podman/v4/libpod/define/errors.go similarity index 98% rename from vendor/github.com/containers/podman/v3/libpod/define/errors.go rename to vendor/github.com/containers/podman/v4/libpod/define/errors.go index 9fd210eed3d..f5a7c73e5dd 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/errors.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/errors.go @@ -3,6 +3,8 @@ package define import ( "errors" "fmt" + + "github.com/containers/common/libnetwork/types" ) var ( @@ -16,7 +18,7 @@ var ( ErrNoSuchVolume = errors.New("no such volume") // ErrNoSuchNetwork indicates the requested network does not exist - ErrNoSuchNetwork = errors.New("network not found") + ErrNoSuchNetwork = types.ErrNoSuchNetwork // ErrNoSuchExecSession indicates that the requested exec session does // not exist. @@ -48,7 +50,7 @@ var ( ErrExecSessionExists = errors.New("exec session already exists") // ErrNetworkExists indicates that a network with the given name already // exists. - ErrNetworkExists = errors.New("network already exists") + ErrNetworkExists = types.ErrNetworkExists // ErrCtrStateInvalid indicates a container is in an improper state for // the requested operation @@ -73,7 +75,7 @@ var ( ErrVolumeFinalized = errors.New("volume has been finalized") // ErrInvalidArg indicates that an invalid argument was passed - ErrInvalidArg = errors.New("invalid argument") + ErrInvalidArg = types.ErrInvalidArg // ErrEmptyID indicates that an empty ID was passed ErrEmptyID = errors.New("name or ID cannot be empty") @@ -94,7 +96,7 @@ var ( ErrWillDeadlock = errors.New("deadlock due to lock mismatch") // ErrNoCgroups indicates that the container does not have its own - // CGroup. + // Cgroup. ErrNoCgroups = errors.New("this container does not have a cgroup") // ErrNoLogs indicates that this container is not creating a log so log // operations cannot be performed on it diff --git a/vendor/github.com/containers/podman/v3/libpod/define/exec_codes.go b/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/exec_codes.go rename to vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/fileinfo.go b/vendor/github.com/containers/podman/v4/libpod/define/fileinfo.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/fileinfo.go rename to vendor/github.com/containers/podman/v4/libpod/define/fileinfo.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/healthchecks.go b/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go similarity index 76% rename from vendor/github.com/containers/podman/v3/libpod/define/healthchecks.go rename to vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go index 4114262b692..bde449d3077 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/healthchecks.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go @@ -34,3 +34,16 @@ const ( // HealthCheckDefined means the healthcheck was found on the container HealthCheckDefined HealthCheckStatus = iota ) + +// Healthcheck defaults. These are used both in the cli as well in +// libpod and were moved from cmd/podman/common +const ( + // DefaultHealthCheckInterval default value + DefaultHealthCheckInterval = "30s" + // DefaultHealthCheckRetries default value + DefaultHealthCheckRetries uint = 3 + // DefaultHealthCheckStartPeriod default value + DefaultHealthCheckStartPeriod = "0s" + // DefaultHealthCheckTimeout default value + DefaultHealthCheckTimeout = "30s" +) diff --git a/vendor/github.com/containers/podman/v3/libpod/define/info.go b/vendor/github.com/containers/podman/v4/libpod/define/info.go similarity index 82% rename from vendor/github.com/containers/podman/v3/libpod/define/info.go rename to vendor/github.com/containers/podman/v4/libpod/define/info.go index 61f2f4c752f..911fa5c03c7 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/info.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/info.go @@ -1,6 +1,8 @@ package define -import "github.com/containers/storage/pkg/idtools" +import ( + "github.com/containers/storage/pkg/idtools" +) // Info is the overall struct that describes the host system // running libpod/podman @@ -12,7 +14,7 @@ type Info struct { Version Version `json:"version"` } -//HostInfo describes the libpod host +// HostInfo describes the libpod host type SecurityInfo struct { AppArmorEnabled bool `json:"apparmorEnabled"` DefaultCapabilities string `json:"capabilities"` @@ -27,10 +29,11 @@ type HostInfo struct { Arch string `json:"arch"` BuildahVersion string `json:"buildahVersion"` CgroupManager string `json:"cgroupManager"` - CGroupsVersion string `json:"cgroupVersion"` + CgroupsVersion string `json:"cgroupVersion"` CgroupControllers []string `json:"cgroupControllers"` Conmon *ConmonInfo `json:"conmon"` CPUs int `json:"cpus"` + CPUUtilization *CPUUsage `json:"cpuUtilization"` Distribution DistributionInfo `json:"distribution"` EventLogger string `json:"eventLogger"` Hostname string `json:"hostname"` @@ -39,6 +42,7 @@ type HostInfo struct { LogDriver string `json:"logDriver"` MemFree int64 `json:"memFree"` MemTotal int64 `json:"memTotal"` + NetworkBackend string `json:"networkBackend"` OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"` OS string `json:"os"` // RemoteSocket returns the UNIX domain socket the Podman service is listening on @@ -107,11 +111,15 @@ type StoreInfo struct { GraphDriverName string `json:"graphDriverName"` GraphOptions map[string]interface{} `json:"graphOptions"` GraphRoot string `json:"graphRoot"` - GraphStatus map[string]string `json:"graphStatus"` - ImageCopyTmpDir string `json:"imageCopyTmpDir"` - ImageStore ImageStore `json:"imageStore"` - RunRoot string `json:"runRoot"` - VolumePath string `json:"volumePath"` + // GraphRootAllocated is how much space the graphroot has in bytes + GraphRootAllocated uint64 `json:"graphRootAllocated"` + // GraphRootUsed is how much of graphroot is used in bytes + GraphRootUsed uint64 `json:"graphRootUsed"` + GraphStatus map[string]string `json:"graphStatus"` + ImageCopyTmpDir string `json:"imageCopyTmpDir"` + ImageStore ImageStore `json:"imageStore"` + RunRoot string `json:"runRoot"` + VolumePath string `json:"volumePath"` } // ImageStore describes the image store. Right now only the number @@ -136,3 +144,9 @@ type Plugins struct { // FIXME what should we do with Authorization, docker seems to return nothing by default // Authorization []string `json:"authorization"` } + +type CPUUsage struct { + UserPercent float64 `json:"userPercent"` + SystemPercent float64 `json:"systemPercent"` + IdlePercent float64 `json:"idlePercent"` +} diff --git a/vendor/github.com/containers/podman/v3/libpod/define/mount.go b/vendor/github.com/containers/podman/v4/libpod/define/mount.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/mount.go rename to vendor/github.com/containers/podman/v4/libpod/define/mount.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/pod_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go similarity index 94% rename from vendor/github.com/containers/podman/v3/libpod/define/pod_inspect.go rename to vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go index 97e7ffdfbf4..e85a660a17a 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/pod_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go @@ -26,12 +26,12 @@ type InspectPodData struct { // Labels is a set of key-value labels that have been applied to the // pod. Labels map[string]string `json:"Labels,omitempty"` - // CreateCgroup is whether this pod will create its own CGroup to group + // CreateCgroup is whether this pod will create its own Cgroup to group // containers under. CreateCgroup bool - // CgroupParent is the parent of the pod's CGroup. + // CgroupParent is the parent of the pod's Cgroup. CgroupParent string `json:"CgroupParent,omitempty"` - // CgroupPath is the path to the pod's CGroup. + // CgroupPath is the path to the pod's Cgroup. CgroupPath string `json:"CgroupPath,omitempty"` // CreateInfra is whether this pod will create an infra container to // share namespaces. @@ -65,6 +65,8 @@ type InspectPodData struct { BlkioDeviceReadBps []InspectBlkioThrottleDevice `json:"device_read_bps,omitempty"` // VolumesFrom contains the containers that the pod inherits mounts from VolumesFrom []string `json:"volumes_from,omitempty"` + // SecurityOpt contains the specified security labels and related SELinux information + SecurityOpts []string `json:"security_opt,omitempty"` } // InspectPodInfraConfig contains the configuration of the pod's infra diff --git a/vendor/github.com/containers/podman/v3/libpod/define/podstate.go b/vendor/github.com/containers/podman/v4/libpod/define/podstate.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/podstate.go rename to vendor/github.com/containers/podman/v4/libpod/define/podstate.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/runtime.go b/vendor/github.com/containers/podman/v4/libpod/define/runtime.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/runtime.go rename to vendor/github.com/containers/podman/v4/libpod/define/runtime.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/terminal.go b/vendor/github.com/containers/podman/v4/libpod/define/terminal.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/define/terminal.go rename to vendor/github.com/containers/podman/v4/libpod/define/terminal.go diff --git a/vendor/github.com/containers/podman/v3/libpod/define/version.go b/vendor/github.com/containers/podman/v4/libpod/define/version.go similarity index 92% rename from vendor/github.com/containers/podman/v3/libpod/define/version.go rename to vendor/github.com/containers/podman/v4/libpod/define/version.go index 5249b5d84c4..2c17e6e92a9 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/version.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/version.go @@ -5,7 +5,7 @@ import ( "strconv" "time" - "github.com/containers/podman/v3/version" + "github.com/containers/podman/v4/version" ) // Overwritten at build time @@ -27,6 +27,7 @@ type Version struct { BuiltTime string Built int64 OsArch string + Os string } // GetVersion returns a VersionOutput struct for API and podman @@ -49,5 +50,6 @@ func GetVersion() (Version, error) { BuiltTime: time.Unix(buildTime, 0).Format(time.ANSIC), Built: buildTime, OsArch: runtime.GOOS + "/" + runtime.GOARCH, + Os: runtime.GOOS, }, nil } diff --git a/vendor/github.com/containers/podman/v3/libpod/define/volume_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go similarity index 84% rename from vendor/github.com/containers/podman/v3/libpod/define/volume_inspect.go rename to vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go index 20602ea164f..fac1791763a 100644 --- a/vendor/github.com/containers/podman/v3/libpod/define/volume_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go @@ -48,4 +48,12 @@ type InspectVolumeData struct { // volume for a specific container, and will be be removed when any // container using it is removed. Anonymous bool `json:"Anonymous,omitempty"` + // MountCount is the number of times this volume has been mounted. + MountCount uint `json:"MountCount"` + // NeedsCopyUp indicates that the next time the volume is mounted into + NeedsCopyUp bool `json:"NeedsCopyUp,omitempty"` + // NeedsChown indicates that the next time the volume is mounted into + // a container, the container will chown the volume to the container process + // UID/GID. + NeedsChown bool `json:"NeedsChown,omitempty"` } diff --git a/vendor/github.com/containers/podman/v3/libpod/diff.go b/vendor/github.com/containers/podman/v4/libpod/diff.go similarity index 95% rename from vendor/github.com/containers/podman/v3/libpod/diff.go rename to vendor/github.com/containers/podman/v4/libpod/diff.go index 6a50bef32d1..794b26b482d 100644 --- a/vendor/github.com/containers/podman/v3/libpod/diff.go +++ b/vendor/github.com/containers/podman/v4/libpod/diff.go @@ -1,8 +1,8 @@ package libpod import ( - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/layers" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/layers" "github.com/containers/storage/pkg/archive" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/doc.go b/vendor/github.com/containers/podman/v4/libpod/doc.go new file mode 100644 index 00000000000..948153181a5 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/doc.go @@ -0,0 +1,11 @@ +// The libpod library is not stable and we do not support use cases outside of +// this repository. The API can change at any time even with patch releases. +// +// If you need a stable interface Podman provides a HTTP API which follows semver, +// please see https://docs.podman.io/en/latest/markdown/podman-system-service.1.html +// to start the api service and https://docs.podman.io/en/latest/_static/api.html +// for the API reference. +// +// We also provide stable go bindings to talk to the api service from another go +// program, see the pkg/bindings directory. +package libpod diff --git a/vendor/github.com/containers/podman/v3/libpod/driver/driver.go b/vendor/github.com/containers/podman/v4/libpod/driver/driver.go similarity index 92% rename from vendor/github.com/containers/podman/v3/libpod/driver/driver.go rename to vendor/github.com/containers/podman/v4/libpod/driver/driver.go index 6fe2cf8ac15..8694685ffcf 100644 --- a/vendor/github.com/containers/podman/v3/libpod/driver/driver.go +++ b/vendor/github.com/containers/podman/v4/libpod/driver/driver.go @@ -1,7 +1,7 @@ package driver import ( - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/containers/storage" ) diff --git a/vendor/github.com/containers/podman/v3/libpod/events.go b/vendor/github.com/containers/podman/v4/libpod/events.go similarity index 96% rename from vendor/github.com/containers/podman/v3/libpod/events.go rename to vendor/github.com/containers/podman/v4/libpod/events.go index 342af02d2c9..39f5786a431 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events.go +++ b/vendor/github.com/containers/podman/v4/libpod/events.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/containers/podman/v3/libpod/events" + "github.com/containers/podman/v4/libpod/events" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -13,8 +13,9 @@ import ( // newEventer returns an eventer that can be used to read/write events func (r *Runtime) newEventer() (events.Eventer, error) { options := events.EventerOptions{ - EventerType: r.config.Engine.EventsLogger, - LogFilePath: r.config.Engine.EventsLogFilePath, + EventerType: r.config.Engine.EventsLogger, + LogFilePath: r.config.Engine.EventsLogFilePath, + LogFileMaxSize: r.config.Engine.EventsLogMaxSize(), } return events.NewEventer(options) } diff --git a/vendor/github.com/containers/podman/v3/libpod/events/config.go b/vendor/github.com/containers/podman/v4/libpod/events/config.go similarity index 91% rename from vendor/github.com/containers/podman/v3/libpod/events/config.go rename to vendor/github.com/containers/podman/v4/libpod/events/config.go index d88d7b6e331..00cdca00705 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events/config.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/config.go @@ -17,6 +17,8 @@ const ( Journald EventerType = iota // Null is a no-op events logger. It does not read or write events. Null EventerType = iota + // Memory indicates the event logger will hold events in memory + Memory EventerType = iota ) // Event describes the attributes of a libpod event @@ -55,11 +57,13 @@ type Details struct { // EventerOptions describe options that need to be passed to create // an eventer type EventerOptions struct { - // EventerType describes whether to use journald or a file + // EventerType describes whether to use journald, file or memory EventerType string // LogFilePath is the path to where the log file should reside if using // the file logger LogFilePath string + // LogFileMaxSize is the default limit used for rotating the log file + LogFileMaxSize uint64 } // Eventer is the interface for journald or file event logging @@ -108,6 +112,8 @@ const ( System Type = "system" // Volume - event is related to volumes Volume Type = "volume" + // Machine - event is related to machine VM's + Machine Type = "machine" // Attach ... Attach Status = "attach" @@ -162,6 +168,8 @@ const ( Refresh Status = "refresh" // Remove ... Remove Status = "remove" + // Rename indicates that a container was renamed + Rename Status = "rename" // Renumber indicates that lock numbers were reallocated at user // request. Renumber Status = "renumber" @@ -169,6 +177,8 @@ const ( Restart Status = "restart" // Restore ... Restore Status = "restore" + // Rotate indicates that the log file was rotated + Rotate Status = "log-rotation" // Save ... Save Status = "save" // Start ... diff --git a/vendor/github.com/containers/podman/v3/libpod/events/events.go b/vendor/github.com/containers/podman/v4/libpod/events/events.go similarity index 85% rename from vendor/github.com/containers/podman/v3/libpod/events/events.go rename to vendor/github.com/containers/podman/v4/libpod/events/events.go index 16dd6424ef5..04417fd8d19 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events/events.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/events.go @@ -3,11 +3,9 @@ package events import ( "encoding/json" "fmt" - "os" "time" "github.com/containers/storage/pkg/stringid" - "github.com/hpcloud/tail" "github.com/pkg/errors" ) @@ -22,6 +20,8 @@ func (et EventerType) String() string { return "file" case Journald: return "journald" + case Memory: + return "memory" case Null: return "none" default: @@ -36,6 +36,8 @@ func IsValidEventer(eventer string) bool { return true case Journald.String(): return true + case Memory.String(): + return true case Null.String(): return true default: @@ -43,7 +45,7 @@ func IsValidEventer(eventer string) bool { } } -// NewEvent creates a event struct and populates with +// NewEvent creates an event struct and populates with // the given status and time. func NewEvent(status Status) Event { return Event{ @@ -65,7 +67,7 @@ func (e *Event) ToJSONString() (string, error) { return string(b), err } -// ToHumanReadable returns human readable event as a formatted string +// ToHumanReadable returns human-readable event as a formatted string func (e *Event) ToHumanReadable(truncate bool) string { var humanFormat string id := e.ID @@ -87,8 +89,12 @@ func (e *Event) ToHumanReadable(truncate bool) string { case Image: humanFormat = fmt.Sprintf("%s %s %s %s %s", e.Time, e.Type, e.Status, id, e.Name) case System: - humanFormat = fmt.Sprintf("%s %s %s", e.Time, e.Type, e.Status) - case Volume: + if e.Name != "" { + humanFormat = fmt.Sprintf("%s %s %s %s", e.Time, e.Type, e.Status, e.Name) + } else { + humanFormat = fmt.Sprintf("%s %s %s", e.Time, e.Type, e.Status) + } + case Volume, Machine: humanFormat = fmt.Sprintf("%s %s %s %s", e.Time, e.Type, e.Status, e.Name) } return humanFormat @@ -97,19 +103,19 @@ func (e *Event) ToHumanReadable(truncate bool) string { // NewEventFromString takes stringified json and converts // it to an event func newEventFromJSONString(event string) (*Event, error) { - e := Event{} - if err := json.Unmarshal([]byte(event), &e); err != nil { + e := new(Event) + if err := json.Unmarshal([]byte(event), e); err != nil { return nil, err } - return &e, nil + return e, nil } -// ToString converts a Type to a string +// String converts a Type to a string func (t Type) String() string { return string(t) } -// ToString converts a status to a string +// String converts a status to a string func (s Status) String() string { return string(s) } @@ -121,6 +127,8 @@ func StringToType(name string) (Type, error) { return Container, nil case Image.String(): return Image, nil + case Machine.String(): + return Machine, nil case Network.String(): return Network, nil case Pod.String(): @@ -188,12 +196,16 @@ func StringToStatus(name string) (Status, error) { return Refresh, nil case Remove.String(): return Remove, nil + case Rename.String(): + return Rename, nil case Renumber.String(): return Renumber, nil case Restart.String(): return Restart, nil case Restore.String(): return Restore, nil + case Rotate.String(): + return Rotate, nil case Save.String(): return Save, nil case Start.String(): @@ -213,14 +225,3 @@ func StringToStatus(name string) (Status, error) { } return "", errors.Errorf("unknown event status %q", name) } - -func (e EventLogFile) getTail(options ReadOptions) (*tail.Tail, error) { - reopen := true - seek := tail.SeekInfo{Offset: 0, Whence: os.SEEK_END} - if options.FromStart || !options.Stream { - seek.Whence = 0 - reopen = false - } - stream := options.Stream - return tail.TailFile(e.options.LogFilePath, tail.Config{ReOpen: reopen, Follow: stream, Location: &seek, Logger: tail.DiscardingLogger, Poll: true}) -} diff --git a/vendor/github.com/containers/podman/v3/libpod/events/events_linux.go b/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go similarity index 91% rename from vendor/github.com/containers/podman/v3/libpod/events/events_linux.go rename to vendor/github.com/containers/podman/v4/libpod/events/events_linux.go index 482d7d6dd52..4320f219018 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events/events_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go @@ -21,6 +21,8 @@ func NewEventer(options EventerOptions) (Eventer, error) { return EventLogFile{options}, nil case strings.ToUpper(Null.String()): return NewNullEventer(), nil + case strings.ToUpper(Memory.String()): + return NewMemoryEventer(), nil default: return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType)) } diff --git a/vendor/github.com/containers/podman/v3/libpod/events/events_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/events/events_unsupported.go rename to vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go index 5b32a1b4b86..25c17552419 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events/events_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package events diff --git a/vendor/github.com/containers/podman/v3/libpod/events/filters.go b/vendor/github.com/containers/podman/v4/libpod/events/filters.go similarity index 98% rename from vendor/github.com/containers/podman/v3/libpod/events/filters.go rename to vendor/github.com/containers/podman/v4/libpod/events/filters.go index d5e2b81f398..64c162db2f6 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events/filters.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/filters.go @@ -4,7 +4,7 @@ import ( "strings" "time" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/pkg/util" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/podman/v3/libpod/events/journal_linux.go b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go similarity index 99% rename from vendor/github.com/containers/podman/v3/libpod/events/journal_linux.go rename to vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go index 72e03355a22..866042a4c3d 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events/journal_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go @@ -1,3 +1,4 @@ +//go:build systemd // +build systemd package events @@ -8,7 +9,7 @@ import ( "strconv" "time" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/pkg/util" "github.com/coreos/go-systemd/v22/journal" "github.com/coreos/go-systemd/v22/sdjournal" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/podman/v3/libpod/events/journal_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/events/journal_unsupported.go rename to vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go index 004efdab22c..6ed39792bca 100644 --- a/vendor/github.com/containers/podman/v3/libpod/events/journal_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go @@ -1,3 +1,4 @@ +//go:build !systemd // +build !systemd package events diff --git a/vendor/github.com/containers/podman/v4/libpod/events/logfile.go b/vendor/github.com/containers/podman/v4/libpod/events/logfile.go new file mode 100644 index 00000000000..21fdd802712 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/logfile.go @@ -0,0 +1,261 @@ +//go:build linux +// +build linux + +package events + +import ( + "bufio" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "time" + + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/storage/pkg/lockfile" + "github.com/nxadm/tail" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// EventLogFile is the structure for event writing to a logfile. It contains the eventer +// options and the event itself. Methods for reading and writing are also defined from it. +type EventLogFile struct { + options EventerOptions +} + +// Writes to the log file +func (e EventLogFile) Write(ee Event) error { + // We need to lock events file + lock, err := lockfile.GetLockfile(e.options.LogFilePath + ".lock") + if err != nil { + return err + } + lock.Lock() + defer lock.Unlock() + + eventJSONString, err := ee.ToJSONString() + if err != nil { + return err + } + + rotated, err := rotateLog(e.options.LogFilePath, eventJSONString, e.options.LogFileMaxSize) + if err != nil { + return fmt.Errorf("rotating log file: %w", err) + } + + if rotated { + rEvent := NewEvent(Rotate) + rEvent.Type = System + rEvent.Name = e.options.LogFilePath + rotateJSONString, err := rEvent.ToJSONString() + if err != nil { + return err + } + if err := e.writeString(rotateJSONString); err != nil { + return err + } + } + + return e.writeString(eventJSONString) +} + +func (e EventLogFile) writeString(s string) error { + f, err := os.OpenFile(e.options.LogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0700) + if err != nil { + return err + } + if _, err := f.WriteString(s + "\n"); err != nil { + return err + } + return nil +} + +func (e EventLogFile) getTail(options ReadOptions) (*tail.Tail, error) { + reopen := true + seek := tail.SeekInfo{Offset: 0, Whence: os.SEEK_END} + if options.FromStart || !options.Stream { + seek.Whence = 0 + reopen = false + } + stream := options.Stream + return tail.TailFile(e.options.LogFilePath, tail.Config{ReOpen: reopen, Follow: stream, Location: &seek, Logger: tail.DiscardingLogger, Poll: true}) +} + +// Reads from the log file +func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error { + defer close(options.EventChannel) + filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until) + if err != nil { + return errors.Wrapf(err, "failed to parse event filters") + } + t, err := e.getTail(options) + if err != nil { + return err + } + if len(options.Until) > 0 { + untilTime, err := util.ParseInputTime(options.Until, false) + if err != nil { + return err + } + go func() { + time.Sleep(time.Until(untilTime)) + if err := t.Stop(); err != nil { + logrus.Errorf("Stopping logger: %v", err) + } + }() + } + funcDone := make(chan bool) + copy := true + go func() { + select { + case <-funcDone: + // Do nothing + case <-ctx.Done(): + copy = false + t.Kill(errors.New("hangup by client")) + } + }() + for line := range t.Lines { + select { + case <-ctx.Done(): + // the consumer has cancelled + return nil + default: + // fallthrough + } + + event, err := newEventFromJSONString(line.Text) + if err != nil { + return err + } + switch event.Type { + case Image, Volume, Pod, System, Container, Network: + // no-op + default: + return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath) + } + if copy && applyFilters(event, filterMap) { + options.EventChannel <- event + } + } + funcDone <- true + return nil +} + +// String returns a string representation of the logger +func (e EventLogFile) String() string { + return LogFile.String() +} + +// Rotates the log file if the log file size and content exceeds limit +func rotateLog(logfile string, content string, limit uint64) (bool, error) { + if limit == 0 { + return false, nil + } + file, err := os.Stat(logfile) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // The logfile does not exist yet. + return false, nil + } + return false, err + } + var filesize = uint64(file.Size()) + var contentsize = uint64(len([]rune(content))) + if filesize+contentsize < limit { + return false, nil + } + + if err := truncate(logfile); err != nil { + return false, err + } + return true, nil +} + +// Truncates the log file and saves 50% of content to new log file +func truncate(filePath string) error { + orig, err := os.Open(filePath) + if err != nil { + return err + } + defer orig.Close() + + origFinfo, err := orig.Stat() + if err != nil { + return err + } + + size := origFinfo.Size() + threshold := size / 2 + + tmp, err := ioutil.TempFile(path.Dir(filePath), "") + if err != nil { + // Retry in /tmp in case creating a tmp file in the same + // directory has failed. + tmp, err = ioutil.TempFile("", "") + if err != nil { + return err + } + } + defer tmp.Close() + + // Jump directly to the threshold, drop the first line and copy the remainder + if _, err := orig.Seek(threshold, 0); err != nil { + return err + } + reader := bufio.NewReader(orig) + if _, err := reader.ReadString('\n'); err != nil { + if !errors.Is(err, io.EOF) { + return err + } + } + if _, err := reader.WriteTo(tmp); err != nil { + return fmt.Errorf("writing truncated contents: %w", err) + } + + if err := renameLog(tmp.Name(), filePath); err != nil { + return fmt.Errorf("writing back %s to %s: %w", tmp.Name(), filePath, err) + } + + return nil +} + +// Renames from, to +func renameLog(from, to string) error { + err := os.Rename(from, to) + if err == nil { + return nil + } + + if !errors.Is(err, unix.EXDEV) { + return err + } + + // Files are not on the same partition, so we need to copy them the + // hard way. + fFrom, err := os.Open(from) + if err != nil { + return err + } + defer fFrom.Close() + + fTo, err := os.Create(to) + if err != nil { + return err + } + defer fTo.Close() + + if _, err := io.Copy(fTo, fFrom); err != nil { + return fmt.Errorf("writing back from temporary file: %w", err) + } + + if err := os.Remove(from); err != nil { + return fmt.Errorf("removing temporary file: %w", err) + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/libpod/events/memory.go b/vendor/github.com/containers/podman/v4/libpod/events/memory.go new file mode 100644 index 00000000000..b3e03d86bca --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/events/memory.go @@ -0,0 +1,49 @@ +package events + +import ( + "context" +) + +// EventMemory is the structure for event writing to a channel. It contains the eventer +// options and the event itself. Methods for reading and writing are also defined from it. +type EventMemory struct { + options EventerOptions + elements chan *Event +} + +// Write event to memory queue +func (e EventMemory) Write(event Event) (err error) { + e.elements <- &event + return +} + +// Read event(s) from memory queue +func (e EventMemory) Read(ctx context.Context, options ReadOptions) (err error) { + select { + case <-ctx.Done(): + return + default: + } + + select { + case event := <-e.elements: + options.EventChannel <- event + default: + } + return nil +} + +// String returns eventer type +func (e EventMemory) String() string { + return e.options.EventerType +} + +// NewMemoryEventer returns configured MemoryEventer +func NewMemoryEventer() Eventer { + return EventMemory{ + options: EventerOptions{ + EventerType: Memory.String(), + }, + elements: make(chan *Event, 100), + } +} diff --git a/vendor/github.com/containers/podman/v3/libpod/events/nullout.go b/vendor/github.com/containers/podman/v4/libpod/events/nullout.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/events/nullout.go rename to vendor/github.com/containers/podman/v4/libpod/events/nullout.go diff --git a/vendor/github.com/containers/podman/v3/libpod/healthcheck.go b/vendor/github.com/containers/podman/v4/libpod/healthcheck.go similarity index 94% rename from vendor/github.com/containers/podman/v3/libpod/healthcheck.go rename to vendor/github.com/containers/podman/v4/libpod/healthcheck.go index 91f031513dd..40af9aec3ca 100644 --- a/vendor/github.com/containers/podman/v3/libpod/healthcheck.go +++ b/vendor/github.com/containers/podman/v4/libpod/healthcheck.go @@ -2,14 +2,13 @@ package libpod import ( "bufio" - "bytes" "io/ioutil" "os" "path/filepath" "strings" "time" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -22,16 +21,6 @@ const ( MaxHealthCheckLogLength = 500 ) -// hcWriteCloser allows us to use bufio as a WriteCloser -type hcWriteCloser struct { - *bufio.Writer -} - -// Used to add a closer to bufio -func (hcwc hcWriteCloser) Close() error { - return nil -} - // HealthCheck verifies the state and validity of the healthcheck configuration // on the container and then executes the healthcheck func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) { @@ -51,7 +40,6 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) { var ( newCommand []string returnCode int - capture bytes.Buffer inStartPeriod bool ) hcCommand := c.HealthCheckConfig().Test @@ -73,20 +61,30 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) { if len(newCommand) < 1 || newCommand[0] == "" { return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID()) } - captureBuffer := bufio.NewWriter(&capture) - hcw := hcWriteCloser{ - captureBuffer, + rPipe, wPipe, err := os.Pipe() + if err != nil { + return define.HealthCheckInternalError, errors.Wrapf(err, "unable to create pipe for healthcheck session") } + defer wPipe.Close() + defer rPipe.Close() + streams := new(define.AttachStreams) - streams.OutputStream = hcw - streams.ErrorStream = hcw streams.InputStream = bufio.NewReader(os.Stdin) - + streams.OutputStream = wPipe + streams.ErrorStream = wPipe streams.AttachOutput = true streams.AttachError = true streams.AttachInput = true + stdout := []string{} + go func() { + scanner := bufio.NewScanner(rPipe) + for scanner.Scan() { + stdout = append(stdout, scanner.Text()) + } + }() + logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID()) timeStart := time.Now() hcResult := define.HealthCheckSuccess @@ -119,7 +117,7 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) { } } - eventLog := capture.String() + eventLog := strings.Join(stdout, "\n") if len(eventLog) > MaxHealthCheckLogLength { eventLog = eventLog[:MaxHealthCheckLogLength] } diff --git a/vendor/github.com/containers/podman/v3/libpod/healthcheck_linux.go b/vendor/github.com/containers/podman/v4/libpod/healthcheck_linux.go similarity index 57% rename from vendor/github.com/containers/podman/v3/libpod/healthcheck_linux.go rename to vendor/github.com/containers/podman/v4/libpod/healthcheck_linux.go index e08214809a0..45b3a0e41dc 100644 --- a/vendor/github.com/containers/podman/v3/libpod/healthcheck_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/healthcheck_linux.go @@ -1,13 +1,14 @@ package libpod import ( + "context" "fmt" "os" "os/exec" "strings" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/systemd" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/systemd" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -55,13 +56,13 @@ func (c *Container) startTimer() error { return errors.Wrapf(err, "unable to get systemd connection to start healthchecks") } defer conn.Close() - _, err = conn.StartUnit(fmt.Sprintf("%s.service", c.ID()), "fail", nil) + _, err = conn.StartUnitContext(context.Background(), fmt.Sprintf("%s.service", c.ID()), "fail", nil) return err } -// removeTimer removes the systemd timer and unit files +// removeTransientFiles removes the systemd timer and unit files // for the container -func (c *Container) removeTimer() error { +func (c *Container) removeTransientFiles(ctx context.Context) error { if c.disableHealthCheckSystemd() { return nil } @@ -71,12 +72,29 @@ func (c *Container) removeTimer() error { } defer conn.Close() timerFile := fmt.Sprintf("%s.timer", c.ID()) - _, err = conn.StopUnit(timerFile, "fail", nil) + serviceFile := fmt.Sprintf("%s.service", c.ID()) - // We want to ignore errors where the timer unit has already been removed. The error - // return is generic so we have to check against the string in the error - if err != nil && strings.HasSuffix(err.Error(), ".timer not loaded.") { - return nil + // If the service has failed (the healthcheck has failed), then + // the .service file is not removed on stopping the unit file. If + // we check the properties of the service, it will automatically + // reset the state. But checking the state takes msecs vs usecs to + // blindly call reset. + if err := conn.ResetFailedUnitContext(ctx, serviceFile); err != nil { + logrus.Debugf("failed to reset unit file: %q", err) + } + + // We want to ignore errors where the timer unit and/or service unit has already + // been removed. The error return is generic so we have to check against the + // string in the error + if _, err = conn.StopUnitContext(ctx, serviceFile, "fail", nil); err != nil { + if !strings.HasSuffix(err.Error(), ".service not loaded.") { + return errors.Wrapf(err, "unable to remove service file") + } + } + if _, err = conn.StopUnitContext(ctx, timerFile, "fail", nil); err != nil { + if strings.HasSuffix(err.Error(), ".timer not loaded.") { + return nil + } } return err } diff --git a/vendor/github.com/containers/podman/v3/libpod/info.go b/vendor/github.com/containers/podman/v4/libpod/info.go similarity index 79% rename from vendor/github.com/containers/podman/v3/libpod/info.go rename to vendor/github.com/containers/podman/v4/libpod/info.go index a2fd1849182..321680a81ef 100644 --- a/vendor/github.com/containers/podman/v3/libpod/info.go +++ b/vendor/github.com/containers/podman/v4/libpod/info.go @@ -5,21 +5,23 @@ import ( "bytes" "fmt" "io/ioutil" + "math" "os" "os/exec" "runtime" "strconv" "strings" + "syscall" "time" "github.com/containers/buildah" "github.com/containers/common/pkg/apparmor" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/seccomp" "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/linkmode" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/linkmode" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/opencontainers/selinux/go-selinux" @@ -104,7 +106,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { return nil, errors.Wrapf(err, "error getting Seccomp profile path") } - // CGroups version + // Cgroups version unified, err := cgroups.IsCgroup2UnifiedMode() if err != nil { return nil, errors.Wrapf(err, "error reading cgroups mode") @@ -115,7 +117,10 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { if err != nil { return nil, errors.Wrapf(err, "error getting available cgroup controllers") } - + cpuUtil, err := getCPUUtilization() + if err != nil { + return nil, err + } info := define.HostInfo{ Arch: runtime.GOARCH, BuildahVersion: buildah.Version, @@ -123,6 +128,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { CgroupControllers: availableControllers, Linkmode: linkmode.Linkmode(), CPUs: runtime.NumCPU(), + CPUUtilization: cpuUtil, Distribution: hostDistributionInfo, LogDriver: r.config.Containers.LogDriver, EventLogger: r.eventer.String(), @@ -131,6 +137,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { Kernel: kv, MemFree: mi.MemFree, MemTotal: mi.MemTotal, + NetworkBackend: r.config.Network.NetworkBackend, OS: runtime.GOOS, Security: define.SecurityInfo{ AppArmorEnabled: apparmor.IsEnabled(), @@ -149,7 +156,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) { if unified { cgroupVersion = "v2" } - info.CGroupsVersion = cgroupVersion + info.CgroupsVersion = cgroupVersion slirp4netnsPath := r.config.Engine.NetworkCmdPath if slirp4netnsPath == "" { @@ -284,17 +291,25 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) { } imageInfo := define.ImageStore{Number: len(images)} + var grStats syscall.Statfs_t + if err := syscall.Statfs(r.store.GraphRoot(), &grStats); err != nil { + return nil, errors.Wrapf(err, "unable to collect graph root usasge for %q", r.store.GraphRoot()) + } + allocated := uint64(grStats.Bsize) * grStats.Blocks info := define.StoreInfo{ - ImageStore: imageInfo, - ImageCopyTmpDir: os.Getenv("TMPDIR"), - ContainerStore: conInfo, - GraphRoot: r.store.GraphRoot(), - RunRoot: r.store.RunRoot(), - GraphDriverName: r.store.GraphDriverName(), - GraphOptions: nil, - VolumePath: r.config.Engine.VolumePath, - ConfigFile: configFile, + ImageStore: imageInfo, + ImageCopyTmpDir: os.Getenv("TMPDIR"), + ContainerStore: conInfo, + GraphRoot: r.store.GraphRoot(), + GraphRootAllocated: allocated, + GraphRootUsed: allocated - (uint64(grStats.Bsize) * grStats.Bfree), + RunRoot: r.store.RunRoot(), + GraphDriverName: r.store.GraphDriverName(), + GraphOptions: nil, + VolumePath: r.config.Engine.VolumePath, + ConfigFile: configFile, } + graphOptions := map[string]interface{}{} for _, o := range r.store.GraphOptions() { split := strings.SplitN(o, "=", 2) @@ -332,7 +347,7 @@ func readKernelVersion() (string, error) { return "", err } f := bytes.Fields(buf) - if len(f) < 2 { + if len(f) < 3 { return string(bytes.TrimSpace(buf)), nil } return string(f[2]), nil @@ -381,3 +396,44 @@ func (r *Runtime) GetHostDistributionInfo() define.DistributionInfo { } return dist } + +// getCPUUtilization Returns a CPUUsage object that summarizes CPU +// usage for userspace, system, and idle time. +func getCPUUtilization() (*define.CPUUsage, error) { + f, err := os.Open("/proc/stat") + if err != nil { + return nil, err + } + defer f.Close() + scanner := bufio.NewScanner(f) + // Read firt line of /proc/stat + for scanner.Scan() { + break + } + // column 1 is user, column 3 is system, column 4 is idle + stats := strings.Split(scanner.Text(), " ") + return statToPercent(stats) +} + +func statToPercent(stats []string) (*define.CPUUsage, error) { + // There is always an extra space between cpu and the first metric + userTotal, err := strconv.ParseFloat(stats[2], 64) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse user value %q", stats[1]) + } + systemTotal, err := strconv.ParseFloat(stats[4], 64) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse system value %q", stats[3]) + } + idleTotal, err := strconv.ParseFloat(stats[5], 64) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse idle value %q", stats[4]) + } + total := userTotal + systemTotal + idleTotal + s := define.CPUUsage{ + UserPercent: math.Round((userTotal/total*100)*100) / 100, + SystemPercent: math.Round((systemTotal/total*100)*100) / 100, + IdlePercent: math.Round((idleTotal/total*100)*100) / 100, + } + return &s, nil +} diff --git a/vendor/github.com/containers/podman/v3/libpod/kube.go b/vendor/github.com/containers/podman/v4/libpod/kube.go similarity index 68% rename from vendor/github.com/containers/podman/v3/libpod/kube.go rename to vendor/github.com/containers/podman/v4/libpod/kube.go index bf86a9d1696..5a5fe9d3589 100644 --- a/vendor/github.com/containers/podman/v3/libpod/kube.go +++ b/vendor/github.com/containers/podman/v4/libpod/kube.go @@ -10,21 +10,24 @@ import ( "strconv" "strings" "time" - - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/env" - "github.com/containers/podman/v3/pkg/lookup" - "github.com/containers/podman/v3/pkg/namespaces" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/pkg/util" + "unicode/utf8" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/env" + v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" + "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource" + v12 "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr" + "github.com/containers/podman/v4/pkg/lookup" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - v12 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // GenerateForKube takes a slice of libpod containers and generates @@ -73,11 +76,15 @@ func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, e Hostnames: []string{hostSli[0]}, }) } - ports, err = ocicniPortMappingToContainerPort(infraContainer.config.PortMappings) + ports, err = portMappingToContainerPort(infraContainer.config.PortMappings) + if err != nil { + return nil, servicePorts, err + } + spState := newServicePortState() + servicePorts, err = spState.containerPortsToServicePorts(ports) if err != nil { return nil, servicePorts, err } - servicePorts = containerPortsToServicePorts(ports) hostNetwork = infraContainer.NetworkMode() == string(namespaces.NetworkMode(specgen.Host)) } pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork) @@ -165,14 +172,92 @@ func (v *Volume) GenerateForKube() *v1.PersistentVolumeClaim { } } +// YAMLPodSpec represents the same k8s API core PodSpec struct with a small +// change and that is having Containers as a pointer to YAMLContainer. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLPodSpec struct { + v1.PodSpec + Containers []*YAMLContainer `json:"containers"` +} + +// YAMLPod represents the same k8s API core Pod struct with a small +// change and that is having Spec as a pointer to YAMLPodSpec and +// Status as a pointer to k8s API core PodStatus. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLPod struct { + v1.Pod + Spec *YAMLPodSpec `json:"spec,omitempty"` + Status *v1.PodStatus `json:"status,omitempty"` +} + +// YAMLService represents the same k8s API core Service struct with a small +// change and that is having Status as a pointer to k8s API core ServiceStatus. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLService struct { + v1.Service + Status *v1.ServiceStatus `json:"status,omitempty"` +} + +// YAMLContainer represents the same k8s API core Container struct with a small +// change and that is having Resources as a pointer to k8s API core ResourceRequirements. +// Because Go doesn't omit empty struct and we want to omit Status in YAML +// if it's empty. Fixes: GH-11998 +type YAMLContainer struct { + v1.Container + Resources *v1.ResourceRequirements `json:"resources,omitempty"` +} + +// ConvertV1PodToYAMLPod takes k8s API core Pod and returns a pointer to YAMLPod +func ConvertV1PodToYAMLPod(pod *v1.Pod) *YAMLPod { + cs := []*YAMLContainer{} + for _, cc := range pod.Spec.Containers { + var res *v1.ResourceRequirements + if len(cc.Resources.Limits) > 0 || len(cc.Resources.Requests) > 0 { + res = &cc.Resources + } + cs = append(cs, &YAMLContainer{Container: cc, Resources: res}) + } + mpo := &YAMLPod{Pod: *pod} + mpo.Spec = &YAMLPodSpec{PodSpec: pod.Spec, Containers: cs} + for _, ctr := range pod.Spec.Containers { + if ctr.SecurityContext == nil || ctr.SecurityContext.SELinuxOptions == nil { + continue + } + selinuxOpts := ctr.SecurityContext.SELinuxOptions + if selinuxOpts.User == "" && selinuxOpts.Role == "" && selinuxOpts.Type == "" && selinuxOpts.Level == "" { + ctr.SecurityContext.SELinuxOptions = nil + } + } + dnsCfg := pod.Spec.DNSConfig + if dnsCfg != nil && (len(dnsCfg.Nameservers)+len(dnsCfg.Searches)+len(dnsCfg.Options) > 0) { + mpo.Spec.DNSConfig = dnsCfg + } + status := pod.Status + if status.Phase != "" || len(status.Conditions) > 0 || + status.Message != "" || status.Reason != "" || + status.NominatedNodeName != "" || status.HostIP != "" || + status.PodIP != "" || status.StartTime != nil || + len(status.InitContainerStatuses) > 0 || len(status.ContainerStatuses) > 0 || status.QOSClass != "" || len(status.EphemeralContainerStatuses) > 0 { + mpo.Status = &status + } + return mpo +} + // GenerateKubeServiceFromV1Pod creates a v1 service object from a v1 pod object -func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) v1.Service { - service := v1.Service{} +func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) (YAMLService, error) { + service := YAMLService{} selector := make(map[string]string) selector["app"] = pod.Labels["app"] ports := servicePorts if len(ports) == 0 { - ports = containersToServicePorts(pod.Spec.Containers) + p, err := containersToServicePorts(pod.Spec.Containers) + if err != nil { + return service, err + } + ports = p } serviceSpec := v1.ServiceSpec{ Ports: ports, @@ -186,38 +271,78 @@ func GenerateKubeServiceFromV1Pod(pod *v1.Pod, servicePorts []v1.ServicePort) v1 APIVersion: pod.TypeMeta.APIVersion, } service.TypeMeta = tm - return service + return service, nil +} + +// servicePortState allows calling containerPortsToServicePorts for a single service +type servicePortState struct { + // A program using the shared math/rand state with the default seed will produce the same sequence of pseudo-random numbers + // for each execution. Use a private RNG state not to interfere with other users. + rng *rand.Rand + usedPorts map[int]struct{} +} + +func newServicePortState() servicePortState { + return servicePortState{ + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + usedPorts: map[int]struct{}{}, + } +} + +func TruncateKubeAnnotation(str string) string { + str = strings.TrimSpace(str) + if utf8.RuneCountInString(str) < define.MaxKubeAnnotation { + return str + } + trunc := string([]rune(str)[:define.MaxKubeAnnotation]) + logrus.Warnf("Truncation Annotation: %q to %q: Kubernetes only allows %d characters", str, trunc, define.MaxKubeAnnotation) + return trunc } // containerPortsToServicePorts takes a slice of containerports and generates a // slice of service ports -func containerPortsToServicePorts(containerPorts []v1.ContainerPort) []v1.ServicePort { +func (state *servicePortState) containerPortsToServicePorts(containerPorts []v1.ContainerPort) ([]v1.ServicePort, error) { sps := make([]v1.ServicePort, 0, len(containerPorts)) for _, cp := range containerPorts { - nodePort := 30000 + rand.Intn(32767-30000+1) + var nodePort int + attempt := 0 + for { + // Legal nodeport range is 30000-32767 + nodePort = 30000 + state.rng.Intn(32767-30000+1) + if _, found := state.usedPorts[nodePort]; !found { + state.usedPorts[nodePort] = struct{}{} + break + } + attempt++ + if attempt >= 100 { + return nil, fmt.Errorf("too many attempts trying to generate a unique NodePort number") + } + } servicePort := v1.ServicePort{ - Protocol: cp.Protocol, - Port: cp.ContainerPort, - NodePort: int32(nodePort), - Name: strconv.Itoa(int(cp.ContainerPort)), + Protocol: cp.Protocol, + Port: cp.ContainerPort, + NodePort: int32(nodePort), + Name: strconv.Itoa(int(cp.ContainerPort)), + TargetPort: intstr.Parse(strconv.Itoa(int(cp.ContainerPort))), } sps = append(sps, servicePort) } - return sps + return sps, nil } // containersToServicePorts takes a slice of v1.Containers and generates an // inclusive list of serviceports to expose -func containersToServicePorts(containers []v1.Container) []v1.ServicePort { - // Without the call to rand.Seed, a program will produce the same sequence of pseudo-random numbers - // for each execution. Legal nodeport range is 30000-32767 - rand.Seed(time.Now().UnixNano()) - +func containersToServicePorts(containers []v1.Container) ([]v1.ServicePort, error) { + state := newServicePortState() sps := make([]v1.ServicePort, 0, len(containers)) for _, ctr := range containers { - sps = append(sps, containerPortsToServicePorts(ctr.Ports)...) + ports, err := state.containerPortsToServicePorts(ctr.Ports) + if err != nil { + return nil, err + } + sps = append(sps, ports...) } - return sps + return sps, nil } func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) { @@ -234,11 +359,13 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po for _, ctr := range containers { if !ctr.IsInfra() { + for k, v := range ctr.config.Spec.Annotations { + podAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = TruncateKubeAnnotation(v) + } // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(removeUnderscores(ctr.Name()), ctr.Labels()) { - podAnnotations[k] = v + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { + podAnnotations[k] = TruncateKubeAnnotation(v) } - isInit := ctr.IsInitCtr() ctr, volumes, _, annotations, err := containerToV1Container(ctx, ctr) @@ -246,7 +373,7 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po return nil, err } for k, v := range annotations { - podAnnotations[define.BindMountPrefix+k] = v + podAnnotations[define.BindMountPrefix+k] = TruncateKubeAnnotation(v) } // Since port bindings for the pod are handled by the // infra container, wipe them here. @@ -330,7 +457,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta InitContainers: initCtrs, Volumes: volumes, } - if dnsOptions != nil { + if dnsOptions != nil && (len(dnsOptions.Nameservers)+len(dnsOptions.Searches)+len(dnsOptions.Options) > 0) { ps.DNSConfig = dnsOptions } p := v1.Pod{ @@ -350,10 +477,16 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, hostNetwork := true podDNS := v1.PodDNSConfig{} kubeAnnotations := make(map[string]string) + ctrNames := make([]string, 0, len(ctrs)) for _, ctr := range ctrs { + ctrNames = append(ctrNames, removeUnderscores(ctr.Name())) + for k, v := range ctr.config.Spec.Annotations { + kubeAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = TruncateKubeAnnotation(v) + } + // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(removeUnderscores(ctr.Name()), ctr.Labels()) { - kubeAnnotations[k] = v + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { + kubeAnnotations[k] = TruncateKubeAnnotation(v) } isInit := ctr.IsInitCtr() @@ -366,7 +499,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, return nil, err } for k, v := range annotations { - kubeAnnotations[define.BindMountPrefix+k] = v + kubeAnnotations[define.BindMountPrefix+k] = TruncateKubeAnnotation(v) } if isInit { kubeInitCtrs = append(kubeInitCtrs, kubeCtr) @@ -407,8 +540,15 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, } } // end if ctrDNS } + podName := removeUnderscores(ctrs[0].Name()) + // Check if the pod name and container name will end up conflicting + // Append -pod if so + if util.StringInSlice(podName, ctrNames) { + podName += "-pod" + } + return newPodObject( - strings.ReplaceAll(ctrs[0].Name(), "_", ""), + podName, kubeAnnotations, kubeInitCtrs, kubeCtrs, @@ -445,16 +585,11 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, [] kubeVolumes = append(kubeVolumes, volumes...) } - envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env) - if err != nil { - return kubeContainer, kubeVolumes, nil, annotations, err - } - portmappings, err := c.PortMappings() if err != nil { return kubeContainer, kubeVolumes, nil, annotations, err } - ports, err := ocicniPortMappingToContainerPort(portmappings) + ports, err := portMappingToContainerPort(portmappings) if err != nil { return kubeContainer, kubeVolumes, nil, annotations, err } @@ -471,25 +606,51 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, [] kubeContainer.Name = removeUnderscores(c.Name()) _, image := c.Image() + + // The infra container may have been created with an overlay root FS + // instead of an infra image. If so, set the imageto the default K8s + // pause one and make sure it's in the storage by pulling it down if + // missing. + if image == "" && c.IsInfra() { + image = c.runtime.config.Engine.InfraImage + if _, err := c.runtime.libimageRuntime.Pull(ctx, image, config.PullPolicyMissing, nil); err != nil { + return kubeContainer, nil, nil, nil, err + } + } + kubeContainer.Image = image kubeContainer.Stdin = c.Stdin() img, _, err := c.runtime.libimageRuntime.LookupImage(image, nil) if err != nil { - return kubeContainer, kubeVolumes, nil, annotations, err + return kubeContainer, kubeVolumes, nil, annotations, fmt.Errorf("looking up image %q of container %q: %w", image, c.ID(), err) } - imgData, err := img.Inspect(ctx, false) + imgData, err := img.Inspect(ctx, nil) if err != nil { return kubeContainer, kubeVolumes, nil, annotations, err } - if reflect.DeepEqual(imgData.Config.Cmd, kubeContainer.Command) { + // If the user doesn't set a command/entrypoint when creating the container with podman and + // is using the image command or entrypoint from the image, don't add it to the generated kube yaml + if reflect.DeepEqual(imgData.Config.Cmd, kubeContainer.Command) || reflect.DeepEqual(imgData.Config.Entrypoint, kubeContainer.Command) { kubeContainer.Command = nil } - kubeContainer.WorkingDir = c.WorkingDir() + if c.WorkingDir() != "/" && imgData.Config.WorkingDir != c.WorkingDir() { + kubeContainer.WorkingDir = c.WorkingDir() + } + + if imgData.User == c.User() { + kubeSec.RunAsGroup, kubeSec.RunAsUser = nil, nil + } + + envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env, imgData.Config.Env) + if err != nil { + return kubeContainer, kubeVolumes, nil, annotations, err + } + kubeContainer.Env = envVariables + kubeContainer.Ports = ports // This should not be applicable - //container.EnvFromSource = - kubeContainer.Env = envVariables + // container.EnvFromSource = kubeContainer.SecurityContext = kubeSec kubeContainer.StdinOnce = false kubeContainer.TTY = c.config.Spec.Process.Terminal @@ -564,36 +725,49 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, [] return kubeContainer, kubeVolumes, &dns, annotations, nil } -// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts +// portMappingToContainerPort takes an portmapping and converts // it to a v1.ContainerPort format for kube output -func ocicniPortMappingToContainerPort(portMappings []types.OCICNIPortMapping) ([]v1.ContainerPort, error) { +func portMappingToContainerPort(portMappings []types.PortMapping) ([]v1.ContainerPort, error) { containerPorts := make([]v1.ContainerPort, 0, len(portMappings)) for _, p := range portMappings { - var protocol v1.Protocol - switch strings.ToUpper(p.Protocol) { - case "TCP": - protocol = v1.ProtocolTCP - case "UDP": - protocol = v1.ProtocolUDP - default: - return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol) - } - cp := v1.ContainerPort{ - // Name will not be supported - HostPort: p.HostPort, - HostIP: p.HostIP, - ContainerPort: p.ContainerPort, - Protocol: protocol, - } - containerPorts = append(containerPorts, cp) + protocols := strings.Split(p.Protocol, ",") + for _, proto := range protocols { + var protocol v1.Protocol + switch strings.ToUpper(proto) { + case "TCP": + // do nothing as it is the default protocol in k8s, there is no need to explicitly + // add it to the generated yaml + case "UDP": + protocol = v1.ProtocolUDP + case "SCTP": + protocol = v1.ProtocolSCTP + default: + return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol) + } + for i := uint16(0); i < p.Range; i++ { + cp := v1.ContainerPort{ + // Name will not be supported + HostPort: int32(p.HostPort + i), + HostIP: p.HostIP, + ContainerPort: int32(p.ContainerPort + i), + Protocol: protocol, + } + containerPorts = append(containerPorts, cp) + } + } } return containerPorts, nil } // libpodEnvVarsToKubeEnvVars converts a key=value string slice to []v1.EnvVar -func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { +func libpodEnvVarsToKubeEnvVars(envs []string, imageEnvs []string) ([]v1.EnvVar, error) { defaultEnv := env.DefaultEnvVariables() envVars := make([]v1.EnvVar, 0, len(envs)) + imageMap := make(map[string]string, len(imageEnvs)) + for _, ie := range imageEnvs { + split := strings.SplitN(ie, "=", 2) + imageMap[split[0]] = split[1] + } for _, e := range envs { split := strings.SplitN(e, "=", 2) if len(split) != 2 { @@ -602,6 +776,9 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { if defaultEnv[split[0]] == split[1] { continue } + if imageMap[split[0]] == split[1] { + continue + } ev := v1.EnvVar{ Name: split[0], Value: split[1], @@ -613,7 +790,7 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) { // libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, map[string]string, error) { - namedVolumes, mounts := c.sortUserVolumes(c.config.Spec) + namedVolumes, mounts := c.SortUserVolumes(c.config.Spec) vms := make([]v1.VolumeMount, 0, len(mounts)) vos := make([]v1.Volume, 0, len(mounts)) annotations := make(map[string]string) @@ -725,7 +902,7 @@ func convertVolumePathToName(hostSourcePath string) (string, error) { } // First, trim trailing slashes, then replace slashes with dashes. // Thus, /mnt/data/ will become mnt-data - return strings.Replace(strings.Trim(hostSourcePath, "/"), "/", "-", -1), nil + return strings.ReplaceAll(strings.Trim(hostSourcePath, "/"), "/", "-"), nil } func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities { @@ -767,14 +944,20 @@ func capAddDrop(caps *specs.LinuxCapabilities) (*v1.Capabilities, error) { if err != nil { return nil, err } + + defCaps := g.Config.Process.Capabilities // Combine all the default capabilities into a slice - defaultCaps := append(g.Config.Process.Capabilities.Ambient, g.Config.Process.Capabilities.Bounding...) - defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Effective...) - defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Inheritable...) - defaultCaps = append(defaultCaps, g.Config.Process.Capabilities.Permitted...) + defaultCaps := make([]string, 0, len(defCaps.Ambient)+len(defCaps.Bounding)+len(defCaps.Effective)+len(defCaps.Inheritable)+len(defCaps.Permitted)) + defaultCaps = append(defaultCaps, defCaps.Ambient...) + defaultCaps = append(defaultCaps, defCaps.Bounding...) + defaultCaps = append(defaultCaps, defCaps.Effective...) + defaultCaps = append(defaultCaps, defCaps.Inheritable...) + defaultCaps = append(defaultCaps, defCaps.Permitted...) // Combine all the container's capabilities into a slice - containerCaps := append(caps.Ambient, caps.Bounding...) + containerCaps := make([]string, 0, len(caps.Ambient)+len(caps.Bounding)+len(caps.Effective)+len(caps.Inheritable)+len(caps.Permitted)) + containerCaps = append(containerCaps, caps.Ambient...) + containerCaps = append(containerCaps, caps.Bounding...) containerCaps = append(containerCaps, caps.Effective...) containerCaps = append(containerCaps, caps.Inheritable...) containerCaps = append(containerCaps, caps.Permitted...) @@ -799,33 +982,42 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { capabilities = newCaps } + sc := v1.SecurityContext{ + // RunAsNonRoot is an optional parameter; our first implementations should be root only; however + // I'm leaving this as a bread-crumb for later + //RunAsNonRoot: &nonRoot, + } + if capabilities != nil { + sc.Capabilities = capabilities + } var selinuxOpts v1.SELinuxOptions opts := strings.SplitN(c.config.Spec.Annotations[define.InspectAnnotationLabel], ":", 2) - if len(opts) == 2 { + switch len(opts) { + case 2: switch opts[0] { case "type": selinuxOpts.Type = opts[1] + sc.SELinuxOptions = &selinuxOpts case "level": selinuxOpts.Level = opts[1] + sc.SELinuxOptions = &selinuxOpts } - } - if len(opts) == 1 { + case 1: if opts[0] == "disable" { selinuxOpts.Type = "spc_t" + sc.SELinuxOptions = &selinuxOpts } } - sc := v1.SecurityContext{ - Capabilities: capabilities, - Privileged: &privileged, - SELinuxOptions: &selinuxOpts, - // RunAsNonRoot is an optional parameter; our first implementations should be root only; however - // I'm leaving this as a bread-crumb for later - //RunAsNonRoot: &nonRoot, - ReadOnlyRootFilesystem: &ro, - AllowPrivilegeEscalation: &allowPrivEscalation, + if !allowPrivEscalation { + sc.AllowPrivilegeEscalation = &allowPrivEscalation + } + if privileged { + sc.Privileged = &privileged + } + if ro { + sc.ReadOnlyRootFilesystem = &ro } - if c.User() != "" { if !c.batched { c.lock.Lock() @@ -842,7 +1034,11 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) { if err != nil { return nil, errors.Wrapf(err, "failed to mount %s mountpoint", c.ID()) } - defer c.unmount(false) + defer func() { + if err := c.unmount(false); err != nil { + logrus.Errorf("Failed to unmount container: %v", err) + } + }() } logrus.Debugf("Looking in container for user: %s", c.User()) @@ -873,7 +1069,7 @@ func generateKubeVolumeDeviceFromLinuxDevice(devices []specs.LinuxDevice) []v1.V } func removeUnderscores(s string) string { - return strings.Replace(s, "_", "", -1) + return strings.ReplaceAll(s, "_", "") } // getAutoUpdateAnnotations searches for auto-update container labels @@ -882,12 +1078,13 @@ func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string) map[s autoUpdateLabel := "io.containers.autoupdate" annotations := make(map[string]string) + ctrName = removeUnderscores(ctrName) for k, v := range ctrLabels { if strings.Contains(k, autoUpdateLabel) { // since labels can variate between containers within a pod, they will be // identified with the container name when converted into kube annotations kc := fmt.Sprintf("%s/%s", k, ctrName) - annotations[kc] = v + annotations[kc] = TruncateKubeAnnotation(v) } } diff --git a/vendor/github.com/containers/podman/v3/libpod/layers/layer.go b/vendor/github.com/containers/podman/v4/libpod/layers/layer.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/layers/layer.go rename to vendor/github.com/containers/podman/v4/libpod/layers/layer.go diff --git a/vendor/github.com/containers/podman/v3/libpod/linkmode/linkmode_dynamic.go b/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_dynamic.go similarity index 88% rename from vendor/github.com/containers/podman/v3/libpod/linkmode/linkmode_dynamic.go rename to vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_dynamic.go index 6d51d60e060..f020fa53e03 100644 --- a/vendor/github.com/containers/podman/v3/libpod/linkmode/linkmode_dynamic.go +++ b/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_dynamic.go @@ -1,3 +1,4 @@ +//go:build !static // +build !static package linkmode diff --git a/vendor/github.com/containers/podman/v3/libpod/linkmode/linkmode_static.go b/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_static.go similarity index 89% rename from vendor/github.com/containers/podman/v3/libpod/linkmode/linkmode_static.go rename to vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_static.go index 2db083f4a48..b181ad285f9 100644 --- a/vendor/github.com/containers/podman/v3/libpod/linkmode/linkmode_static.go +++ b/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_static.go @@ -1,3 +1,4 @@ +//go:build static // +build static package linkmode diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/file/file_lock.go b/vendor/github.com/containers/podman/v4/libpod/lock/file/file_lock.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/lock/file/file_lock.go rename to vendor/github.com/containers/podman/v4/libpod/lock/file/file_lock.go diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/file_lock_manager.go b/vendor/github.com/containers/podman/v4/libpod/lock/file_lock_manager.go similarity index 97% rename from vendor/github.com/containers/podman/v3/libpod/lock/file_lock_manager.go rename to vendor/github.com/containers/podman/v4/libpod/lock/file_lock_manager.go index 155606642f4..d89b0cc98aa 100644 --- a/vendor/github.com/containers/podman/v3/libpod/lock/file_lock_manager.go +++ b/vendor/github.com/containers/podman/v4/libpod/lock/file_lock_manager.go @@ -1,7 +1,7 @@ package lock import ( - "github.com/containers/podman/v3/libpod/lock/file" + "github.com/containers/podman/v4/libpod/lock/file" ) // FileLockManager manages shared memory locks. diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/in_memory_locks.go b/vendor/github.com/containers/podman/v4/libpod/lock/in_memory_locks.go similarity index 99% rename from vendor/github.com/containers/podman/v3/libpod/lock/in_memory_locks.go rename to vendor/github.com/containers/podman/v4/libpod/lock/in_memory_locks.go index f3c842f89ee..f7f47760c62 100644 --- a/vendor/github.com/containers/podman/v3/libpod/lock/in_memory_locks.go +++ b/vendor/github.com/containers/podman/v4/libpod/lock/in_memory_locks.go @@ -49,7 +49,7 @@ type InMemoryManager struct { // of locks. func NewInMemoryManager(numLocks uint32) (Manager, error) { if numLocks == 0 { - return nil, errors.Errorf("must provide a non-zero number of locks!") + return nil, errors.Errorf("must provide a non-zero number of locks") } manager := new(InMemoryManager) diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/lock.go b/vendor/github.com/containers/podman/v4/libpod/lock/lock.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/lock/lock.go rename to vendor/github.com/containers/podman/v4/libpod/lock/lock.go diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock.c b/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.c similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock.c rename to vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.c diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock.go b/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.go similarity index 92% rename from vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock.go rename to vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.go index 322e92a8f6e..c7f4d1bc59c 100644 --- a/vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock.go +++ b/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package shm @@ -130,8 +131,17 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) { // semaphore indexes, and can still return error codes. retCode := C.allocate_semaphore(locks.lockStruct) if retCode < 0 { + var err = syscall.Errno(-1 * retCode) // Negative errno returned - return 0, syscall.Errno(-1 * retCode) + if errors.Is(err, syscall.ENOSPC) { + // ENOSPC expands to "no space left on device". While it is technically true + // that there's no room in the SHM inn for this lock, this tends to send normal people + // down the path of checking disk-space which is not actually their problem. + // Give a clue that it's actually due to num_locks filling up. + var errFull = errors.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks) + return uint32(retCode), errFull + } + return uint32(retCode), syscall.Errno(-1 * retCode) } return uint32(retCode), nil diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock.h b/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.h similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock.h rename to vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.h diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock_nocgo.go b/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock_nocgo.go similarity index 99% rename from vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock_nocgo.go rename to vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock_nocgo.go index 627344d9c72..31fc022235f 100644 --- a/vendor/github.com/containers/podman/v3/libpod/lock/shm/shm_lock_nocgo.go +++ b/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock_nocgo.go @@ -1,3 +1,4 @@ +//go:build linux && !cgo // +build linux,!cgo package shm diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/shm_lock_manager_linux.go b/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_linux.go similarity index 97% rename from vendor/github.com/containers/podman/v3/libpod/lock/shm_lock_manager_linux.go rename to vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_linux.go index ecccb2bcbe4..3076cd864e7 100644 --- a/vendor/github.com/containers/podman/v3/libpod/lock/shm_lock_manager_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package lock @@ -5,7 +6,7 @@ package lock import ( "syscall" - "github.com/containers/podman/v3/libpod/lock/shm" + "github.com/containers/podman/v4/libpod/lock/shm" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/podman/v3/libpod/lock/shm_lock_manager_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_unsupported.go similarity index 98% rename from vendor/github.com/containers/podman/v3/libpod/lock/shm_lock_manager_unsupported.go rename to vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_unsupported.go index 1d6e3fcbdd3..d578359aba1 100644 --- a/vendor/github.com/containers/podman/v3/libpod/lock/shm_lock_manager_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package lock diff --git a/vendor/github.com/containers/podman/v3/libpod/logs/log.go b/vendor/github.com/containers/podman/v4/libpod/logs/log.go similarity index 88% rename from vendor/github.com/containers/podman/v3/libpod/logs/log.go rename to vendor/github.com/containers/podman/v4/libpod/logs/log.go index 19a121fe9a4..4d7d5ac5874 100644 --- a/vendor/github.com/containers/podman/v3/libpod/logs/log.go +++ b/vendor/github.com/containers/podman/v4/libpod/logs/log.go @@ -8,8 +8,8 @@ import ( "sync" "time" - "github.com/containers/podman/v3/libpod/logs/reversereader" - "github.com/hpcloud/tail" + "github.com/containers/podman/v4/libpod/logs/reversereader" + "github.com/nxadm/tail" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -27,6 +27,9 @@ const ( // FullLogType signifies a log line is full FullLogType = "F" + + // ANSIEscapeResetCode is a code that resets all colors and text effects + ANSIEscapeResetCode = "\033[0m" ) // LogOptions is the options you can use for logs @@ -37,6 +40,7 @@ type LogOptions struct { Until time.Time Tail int64 Timestamps bool + Colors bool Multi bool WaitGroup *sync.WaitGroup UseName bool @@ -50,6 +54,7 @@ type LogLine struct { Msg string CID string CName string + ColorID int64 } // GetLogFile returns an hp tail for a container given options @@ -162,6 +167,24 @@ func getTailLog(path string, tail int) ([]*LogLine, error) { return tailLog, nil } +// getColor returns a ANSI escape code for color based on the colorID +func getColor(colorID int64) string { + colors := map[int64]string{ + 0: "\033[37m", // Light Gray + 1: "\033[31m", // Red + 2: "\033[33m", // Yellow + 3: "\033[34m", // Blue + 4: "\033[35m", // Magenta + 5: "\033[36m", // Cyan + 6: "\033[32m", // Green + } + return colors[colorID%int64(len(colors))] +} + +func (l *LogLine) colorize(prefix string) string { + return getColor(l.ColorID) + prefix + l.Msg + ANSIEscapeResetCode +} + // String converts a log line to a string for output given whether a detail // bool is specified. func (l *LogLine) String(options *LogOptions) string { @@ -177,10 +200,18 @@ func (l *LogLine) String(options *LogOptions) string { out = fmt.Sprintf("%s ", cid) } } + if options.Timestamps { out += fmt.Sprintf("%s ", l.Time.Format(LogTimeFormat)) } - return out + l.Msg + + if options.Colors { + out = l.colorize(out) + } else { + out += l.Msg + } + + return out } // Since returns a bool as to whether a log line occurred after a given time diff --git a/vendor/github.com/containers/podman/v3/libpod/logs/reversereader/reversereader.go b/vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/logs/reversereader/reversereader.go rename to vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go diff --git a/vendor/github.com/containers/podman/v3/libpod/mounts_linux.go b/vendor/github.com/containers/podman/v4/libpod/mounts_linux.go similarity index 96% rename from vendor/github.com/containers/podman/v3/libpod/mounts_linux.go rename to vendor/github.com/containers/podman/v4/libpod/mounts_linux.go index e6aa09eac2c..f6945b3a360 100644 --- a/vendor/github.com/containers/podman/v3/libpod/mounts_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/mounts_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod diff --git a/vendor/github.com/containers/podman/v3/libpod/networking_linux.go b/vendor/github.com/containers/podman/v4/libpod/networking_linux.go similarity index 57% rename from vendor/github.com/containers/podman/v3/libpod/networking_linux.go rename to vendor/github.com/containers/podman/v4/libpod/networking_linux.go index e792a410cc7..0c124cf0bf0 100644 --- a/vendor/github.com/containers/podman/v3/libpod/networking_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/networking_linux.go @@ -1,9 +1,11 @@ +//go:build linux // +build linux package libpod import ( "crypto/rand" + "crypto/sha256" "fmt" "io/ioutil" "net" @@ -11,22 +13,28 @@ import ( "os/exec" "path/filepath" "regexp" + "sort" "strconv" "strings" "syscall" "time" "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/errorhandling" - "github.com/containers/podman/v3/pkg/namespaces" - "github.com/containers/podman/v3/pkg/netns" - "github.com/containers/podman/v3/pkg/resolvconf" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/common/libnetwork/etchosts" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/common/pkg/netns" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/resolvconf" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v4/utils" "github.com/containers/storage/pkg/lockfile" + spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -41,116 +49,64 @@ const ( // default slirp4ns subnet defaultSlirp4netnsSubnet = "10.0.2.0/24" - // rootlessCNINSName is the file name for the rootless network namespace bind mount - rootlessCNINSName = "rootless-cni-ns" + // rootlessNetNsName is the file name for the rootless network namespace bind mount + rootlessNetNsName = "rootless-netns" + + // rootlessNetNsSilrp4netnsPidFile is the name of the rootless netns slirp4netns pid file + rootlessNetNsSilrp4netnsPidFile = "rootless-netns-slirp4netns.pid" // persistentCNIDir is the directory where the CNI files are stored persistentCNIDir = "/var/lib/cni" ) -// GetAllNetworkAliases returns all configured aliases for this container. -// It also adds the container short ID as alias to match docker. -func (c *Container) GetAllNetworkAliases() (map[string][]string, error) { - allAliases, err := c.runtime.state.GetAllNetworkAliases(c) - if err != nil { - return nil, err - } - - // get the all attached networks, we cannot use GetAllNetworkAliases() - // since it returns nil if there are no aliases - nets, _, err := c.networks() - if err != nil { - return nil, err +// convertPortMappings will remove the HostIP part from the ports when running inside podman machine. +// This is need because a HostIP of 127.0.0.1 would now allow the gvproxy forwarder to reach to open ports. +// For machine the HostIP must only be used by gvproxy and never in the VM. +func (c *Container) convertPortMappings() []types.PortMapping { + if !machine.IsGvProxyBased() || len(c.config.PortMappings) == 0 { + return c.config.PortMappings } - - // add container short ID as alias to match docker - for _, net := range nets { - allAliases[net] = append(allAliases[net], c.config.ID[:12]) + // if we run in a machine VM we have to ignore the host IP part + newPorts := make([]types.PortMapping, 0, len(c.config.PortMappings)) + for _, port := range c.config.PortMappings { + port.HostIP = "" + newPorts = append(newPorts, port) } - return allAliases, nil -} - -// GetNetworkAliases returns configured aliases for this network. -// It also adds the container short ID as alias to match docker. -func (c *Container) GetNetworkAliases(netName string) ([]string, error) { - aliases, err := c.runtime.state.GetNetworkAliases(c, netName) - if err != nil { - return nil, err - } - - // add container short ID as alias to match docker - aliases = append(aliases, c.config.ID[:12]) - return aliases, nil + return newPorts } -func (c *Container) getNetworkOptions() (types.NetworkOptions, error) { +func (c *Container) getNetworkOptions(networkOpts map[string]types.PerNetworkOptions) types.NetworkOptions { opts := types.NetworkOptions{ ContainerID: c.config.ID, ContainerName: getCNIPodName(c), } - // TODO remove ocicni PortMappings from container config and store as types PortMappings - if len(c.config.PortMappings) > 0 { - opts.PortMappings = ocicniPortsToNetTypesPorts(c.config.PortMappings) - } - networks, _, err := c.networks() - if err != nil { - return opts, err - } - aliases, err := c.GetAllNetworkAliases() - if err != nil { - return opts, err - } + opts.PortMappings = c.convertPortMappings() // If the container requested special network options use this instead of the config. // This is the case for container restore or network reload. if c.perNetworkOpts != nil { opts.Networks = c.perNetworkOpts - return opts, nil - } - - // Update container map of interface descriptions - if err := c.setupNetworkDescriptions(networks); err != nil { - return opts, err - } - - nets := make(map[string]types.PerNetworkOptions, len(networks)) - for i, network := range networks { - eth, exists := c.state.NetInterfaceDescriptions.getInterfaceByName(network) - if !exists { - return opts, errors.Errorf("no network interface name for container %s on network %s", c.config.ID, network) - } - netOpts := types.PerNetworkOptions{ - InterfaceName: eth, - Aliases: aliases[network], - } - // only set the static ip/mac on the first network - if i == 0 { - if c.config.StaticIP != nil { - netOpts.StaticIPs = []net.IP{c.config.StaticIP} - } - netOpts.StaticMAC = c.config.StaticMAC - } - nets[network] = netOpts + } else { + opts.Networks = networkOpts } - opts.Networks = nets - return opts, nil + return opts } -type RootlessCNI struct { +type RootlessNetNS struct { ns ns.NetNS dir string Lock lockfile.Locker } -// getPath will join the given path to the rootless cni dir -func (r *RootlessCNI) getPath(path string) string { +// getPath will join the given path to the rootless netns dir +func (r *RootlessNetNS) getPath(path string) string { return filepath.Join(r.dir, path) } -// Do - run the given function in the rootless cni ns. +// Do - run the given function in the rootless netns. // It does not lock the rootlessCNI lock, the caller // should only lock when needed, e.g. for cni operations. -func (r *RootlessCNI) Do(toRun func() error) error { +func (r *RootlessNetNS) Do(toRun func() error) error { err := r.ns.Do(func(_ ns.NetNS) error { // Before we can run the given function, // we have to setup all mounts correctly. @@ -161,11 +117,11 @@ func (r *RootlessCNI) Do(toRun func() error) error { // Because the plugins also need access to XDG_RUNTIME_DIR/netns some special setup is needed. // The following bind mounts are needed - // 1. XDG_RUNTIME_DIR/netns -> XDG_RUNTIME_DIR/rootless-cni/XDG_RUNTIME_DIR/netns - // 2. /run/systemd -> XDG_RUNTIME_DIR/rootless-cni/run/systemd (only if it exists) - // 3. XDG_RUNTIME_DIR/rootless-cni/resolv.conf -> /etc/resolv.conf or XDG_RUNTIME_DIR/rootless-cni/run/symlink/target - // 4. XDG_RUNTIME_DIR/rootless-cni/var/lib/cni -> /var/lib/cni (if /var/lib/cni does not exists use the parent dir) - // 5. XDG_RUNTIME_DIR/rootless-cni/run -> /run + // 1. XDG_RUNTIME_DIR -> XDG_RUNTIME_DIR/rootless-netns/XDG_RUNTIME_DIR + // 2. /run/systemd -> XDG_RUNTIME_DIR/rootless-netns/run/systemd (only if it exists) + // 3. XDG_RUNTIME_DIR/rootless-netns/resolv.conf -> /etc/resolv.conf or XDG_RUNTIME_DIR/rootless-netns/run/symlink/target + // 4. XDG_RUNTIME_DIR/rootless-netns/var/lib/cni -> /var/lib/cni (if /var/lib/cni does not exists use the parent dir) + // 5. XDG_RUNTIME_DIR/rootless-netns/run -> /run // Create a new mount namespace, // this must happen inside the netns thread. @@ -174,16 +130,16 @@ func (r *RootlessCNI) Do(toRun func() error) error { return errors.Wrapf(err, "cannot create a new mount namespace") } - netNsDir, err := netns.GetNSRunDir() + xdgRuntimeDir, err := util.GetRuntimeDir() if err != nil { - return errors.Wrap(err, "could not get network namespace directory") + return errors.Wrap(err, "could not get runtime directory") } - newNetNsDir := r.getPath(netNsDir) + newXDGRuntimeDir := r.getPath(xdgRuntimeDir) // 1. Mount the netns into the new run to keep them accessible. // Otherwise cni setup will fail because it cannot access the netns files. - err = unix.Mount(netNsDir, newNetNsDir, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "") + err = unix.Mount(xdgRuntimeDir, newXDGRuntimeDir, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "") if err != nil { - return errors.Wrap(err, "failed to mount netns directory for rootless cni") + return errors.Wrap(err, "failed to mount runtime directory for rootless netns") } // 2. Also keep /run/systemd if it exists. @@ -194,7 +150,7 @@ func (r *RootlessCNI) Do(toRun func() error) error { newRunSystemd := r.getPath(runSystemd) err = unix.Mount(runSystemd, newRunSystemd, "none", unix.MS_BIND|unix.MS_REC, "") if err != nil { - return errors.Wrap(err, "failed to mount /run/systemd directory for rootless cni") + return errors.Wrap(err, "failed to mount /run/systemd directory for rootless netns") } } @@ -204,29 +160,62 @@ func (r *RootlessCNI) Do(toRun func() error) error { // the link target will be available in the mount ns. // see: https://github.com/containers/podman/issues/10855 resolvePath := "/etc/resolv.conf" - for i := 0; i < 255; i++ { + linkCount := 0 + for i := 1; i < len(resolvePath); i++ { // Do not use filepath.EvalSymlinks, we only want the first symlink under /run. // If /etc/resolv.conf has more than one symlink under /run, e.g. // -> /run/systemd/resolve/stub-resolv.conf -> /run/systemd/resolve/resolv.conf // we would put the netns resolv.conf file to the last path. However this will // break dns because the second link does not exists in the mount ns. // see https://github.com/containers/podman/issues/11222 - link, err := os.Readlink(resolvePath) + // + // We also need to resolve all path components not just the last file. + // see https://github.com/containers/podman/issues/12461 + + if resolvePath[i] != '/' { + // if we are at the last char we need to inc i by one because there is no final slash + if i == len(resolvePath)-1 { + i++ + } else { + // not the end of path, keep going + continue + } + } + path := resolvePath[:i] + + fi, err := os.Lstat(path) if err != nil { - // if there is no symlink exit - break + return errors.Wrap(err, "failed to stat resolv.conf path") } + + // no link, just continue + if fi.Mode()&os.ModeSymlink == 0 { + continue + } + + link, err := os.Readlink(path) + if err != nil { + return errors.Wrap(err, "failed to read resolv.conf symlink") + } + linkCount++ if filepath.IsAbs(link) { // link is as an absolute path - resolvePath = link + resolvePath = filepath.Join(link, resolvePath[i:]) } else { // link is as a relative, join it with the previous path - resolvePath = filepath.Join(filepath.Dir(resolvePath), link) + base := filepath.Dir(path) + resolvePath = filepath.Join(base, link, resolvePath[i:]) } + // set i back to zero since we now have a new base path + i = 0 + + // we have to stop at the first path under /run because we will have an empty /run and will create the path anyway + // if we would continue we would need to recreate all links under /run if strings.HasPrefix(resolvePath, "/run/") { break } - if i == 254 { + // make sure wo do not loop forever + if linkCount == 255 { return errors.New("too many symlinks while resolving /etc/resolv.conf") } } @@ -242,25 +231,25 @@ func (r *RootlessCNI) Do(toRun func() error) error { rsr := r.getPath("/run/systemd/resolve") err = unix.Mount("", rsr, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, "") if err != nil { - return errors.Wrapf(err, "failed to mount tmpfs on %q for rootless cni", rsr) + return errors.Wrapf(err, "failed to mount tmpfs on %q for rootless netns", rsr) } } if strings.HasPrefix(resolvePath, "/run/") { resolvePath = r.getPath(resolvePath) err = os.MkdirAll(filepath.Dir(resolvePath), 0700) if err != nil { - return errors.Wrap(err, "failed to create rootless-cni resolv.conf directory") + return errors.Wrap(err, "failed to create rootless-netns resolv.conf directory") } // we want to bind mount on this file so we have to create the file first _, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0700) if err != nil { - return errors.Wrap(err, "failed to create rootless-cni resolv.conf file") + return errors.Wrap(err, "failed to create rootless-netns resolv.conf file") } } // mount resolv.conf to make use of the host dns err = unix.Mount(r.getPath("resolv.conf"), resolvePath, "none", unix.MS_BIND, "") if err != nil { - return errors.Wrap(err, "failed to mount resolv.conf for rootless cni") + return errors.Wrap(err, "failed to mount resolv.conf for rootless netns") } // 4. CNI plugins need access to /var/lib/cni and /run @@ -285,14 +274,14 @@ func (r *RootlessCNI) Do(toRun func() error) error { // make sure to mount var first err = unix.Mount(varDir, varTarget, "none", unix.MS_BIND, "") if err != nil { - return errors.Wrapf(err, "failed to mount %s for rootless cni", varTarget) + return errors.Wrapf(err, "failed to mount %s for rootless netns", varTarget) } // 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts. runDir := r.getPath("run") err = unix.Mount(runDir, "/run", "none", unix.MS_BIND|unix.MS_REC, "") if err != nil { - return errors.Wrap(err, "failed to mount /run for rootless cni") + return errors.Wrap(err, "failed to mount /run for rootless netns") } // run the given function in the correct namespace @@ -302,122 +291,133 @@ func (r *RootlessCNI) Do(toRun func() error) error { return err } -// Cleanup the rootless cni namespace if needed. +// Cleanup the rootless network namespace if needed. // It checks if we have running containers with the bridge network mode. -// Cleanup() will try to lock RootlessCNI, therefore you have to call it with an unlocked -func (r *RootlessCNI) Cleanup(runtime *Runtime) error { +// Cleanup() expects that r.Lock is locked +func (r *RootlessNetNS) Cleanup(runtime *Runtime) error { _, err := os.Stat(r.dir) if os.IsNotExist(err) { // the directory does not exists no need for cleanup return nil } - r.Lock.Lock() - defer r.Lock.Unlock() - running := func(c *Container) bool { + activeNetns := func(c *Container) bool { + // no bridge => no need to check + if !c.config.NetMode.IsBridge() { + return false + } + // we cannot use c.state() because it will try to lock the container - // using c.state.State directly should be good enough for this use case - state := c.state.State - return state == define.ContainerStateRunning + // locking is a problem because cleanup is called after net teardown + // at this stage the container is already locked. + // also do not try to lock only containers which are not currently in net + // teardown because this will result in an ABBA deadlock between the rootless + // cni lock and the container lock + // because we need to get the state we have to sync otherwise this will not + // work because the state is empty by default + // I do not like this but I do not see a better way at moment + err := c.syncContainer() + if err != nil { + return false + } + + // only check for an active netns, we cannot use the container state + // because not running does not mean that the netns does not need cleanup + // only if the netns is empty we know that we do not need cleanup + return c.state.NetNS != nil } - ctrs, err := runtime.GetContainersWithoutLock(running) + ctrs, err := runtime.GetContainers(activeNetns) if err != nil { return err } - cleanup := true - for _, ctr := range ctrs { - if ctr.config.NetMode.IsBridge() { - cleanup = false - } + // no cleanup if we found no other containers with a netns + // we will always find one container (the container cleanup that is currently calling us) + if len(ctrs) > 1 { + return nil } - if cleanup { - // make sure the the cni results (cache) dir is empty - // libpod instances with another root dir are not covered by the check above - // this allows several libpod instances to use the same rootless cni ns - contents, err := ioutil.ReadDir(r.getPath("var/lib/cni/results")) - if (err == nil && len(contents) == 0) || os.IsNotExist(err) { - logrus.Debug("Cleaning up rootless cni namespace") - err = netns.UnmountNS(r.ns) - if err != nil { - return err - } - // make the following errors not fatal - err = r.ns.Close() - if err != nil { - logrus.Error(err) - } - b, err := ioutil.ReadFile(r.getPath("rootless-cni-slirp4netns.pid")) - if err == nil { - var i int - i, err = strconv.Atoi(string(b)) - if err == nil { - // kill the slirp process so we do not leak it - err = syscall.Kill(i, syscall.SIGTERM) - } - } - if err != nil { - logrus.Errorf("Failed to kill slirp4netns process: %s", err) - } - err = os.RemoveAll(r.dir) - if err != nil { - logrus.Error(err) - } - } else if err != nil && !os.IsNotExist(err) { - logrus.Errorf("Could not read rootless cni directory, skipping cleanup: %s", err) + logrus.Debug("Cleaning up rootless network namespace") + err = netns.UnmountNS(r.ns) + if err != nil { + return err + } + // make the following errors not fatal + err = r.ns.Close() + if err != nil { + logrus.Error(err) + } + b, err := ioutil.ReadFile(r.getPath(rootlessNetNsSilrp4netnsPidFile)) + if err == nil { + var i int + i, err = strconv.Atoi(string(b)) + if err == nil { + // kill the slirp process so we do not leak it + err = syscall.Kill(i, syscall.SIGTERM) } } + if err != nil { + logrus.Errorf("Failed to kill slirp4netns process: %s", err) + } + err = os.RemoveAll(r.dir) + if err != nil { + logrus.Error(err) + } return nil } -// GetRootlessCNINetNs returns the rootless cni object. If create is set to true -// the rootless cni namespace will be created if it does not exists already. +// GetRootlessNetNs returns the rootless netns object. If create is set to true +// the rootless network namespace will be created if it does not exists already. // If called as root it returns always nil. // On success the returned RootlessCNI lock is locked and must be unlocked by the caller. -func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) { +func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { if !rootless.IsRootless() { return nil, nil } - var rootlessCNINS *RootlessCNI - runDir, err := util.GetRuntimeDir() - if err != nil { - return nil, err - } + var rootlessNetNS *RootlessNetNS + runDir := r.config.Engine.TmpDir - lfile := filepath.Join(runDir, "rootless-cni.lock") + lfile := filepath.Join(runDir, "rootless-netns.lock") lock, err := lockfile.GetLockfile(lfile) if err != nil { - return nil, errors.Wrap(err, "failed to get rootless-cni lockfile") + return nil, errors.Wrap(err, "failed to get rootless-netns lockfile") } lock.Lock() defer func() { - // In case of an error (early exit) rootlessCNINS will be nil. + // In case of an error (early exit) rootlessNetNS will be nil. // Make sure to unlock otherwise we could deadlock. - if rootlessCNINS == nil { + if rootlessNetNS == nil { lock.Unlock() } }() - cniDir := filepath.Join(runDir, "rootless-cni") - err = os.MkdirAll(cniDir, 0700) + rootlessNetNsDir := filepath.Join(runDir, rootlessNetNsName) + err = os.MkdirAll(rootlessNetNsDir, 0700) if err != nil { - return nil, errors.Wrap(err, "could not create rootless-cni directory") + return nil, errors.Wrap(err, "could not create rootless-netns directory") } nsDir, err := netns.GetNSRunDir() if err != nil { return nil, err } - path := filepath.Join(nsDir, rootlessCNINSName) + + // create a hash from the static dir + // the cleanup will check if there are running containers + // if you run a several libpod instances with different root/runroot directories this check will fail + // we want one netns for each libpod static dir so we use the hash to prevent name collisions + hash := sha256.Sum256([]byte(r.config.Engine.StaticDir)) + netnsName := fmt.Sprintf("%s-%x", rootlessNetNsName, hash[:10]) + + path := filepath.Join(nsDir, netnsName) ns, err := ns.GetNS(path) if err != nil { if !new { // return a error if we could not get the namespace and should no create one - return nil, errors.Wrap(err, "error getting rootless cni network namespace") + return nil, errors.Wrap(err, "error getting rootless network namespace") } // create a new namespace - logrus.Debug("creating rootless cni network namespace") - ns, err = netns.NewNSWithName(rootlessCNINSName) + logrus.Debugf("creating rootless network namespace with name %q", netnsName) + ns, err = netns.NewNSWithName(netnsName) if err != nil { - return nil, errors.Wrap(err, "error creating rootless cni network namespace") + return nil, errors.Wrap(err, "error creating rootless network namespace") } // setup slirp4netns here path := r.config.Engine.NetworkCmdPath @@ -467,7 +467,7 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) { // Leak one end of the pipe in slirp4netns cmd.ExtraFiles = append(cmd.ExtraFiles, syncW) - logPath := filepath.Join(r.config.Engine.TmpDir, "slirp4netns-rootless-cni.log") + logPath := filepath.Join(r.config.Engine.TmpDir, "slirp4netns-rootless-netns.log") logFile, err := os.Create(logPath) if err != nil { return nil, errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath) @@ -486,9 +486,9 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) { // create pid file for the slirp4netns process // this is need to kill the process in the cleanup pid := strconv.Itoa(cmd.Process.Pid) - err = ioutil.WriteFile(filepath.Join(cniDir, "rootless-cni-slirp4netns.pid"), []byte(pid), 0700) + err = ioutil.WriteFile(filepath.Join(rootlessNetNsDir, rootlessNetNsSilrp4netnsPidFile), []byte(pid), 0700) if err != nil { - errors.Wrap(err, "unable to write rootless-cni slirp4netns pid file") + return nil, errors.Wrap(err, "unable to write rootless-netns slirp4netns pid file") } defer func() { @@ -501,6 +501,15 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) { return nil, err } + if utils.RunsOnSystemd() { + // move to systemd scope to prevent systemd from killing it + err = utils.MoveRootlessNetnsSlirpProcessToUserSlice(cmd.Process.Pid) + if err != nil { + // only log this, it is not fatal but can lead to issues when running podman inside systemd units + logrus.Errorf("failed to move the rootless netns slirp4netns process to the systemd user.slice: %v", err) + } + } + // build a new resolv.conf file which uses the slirp4netns dns server address resolveIP, err := GetSlirp4netnsDNS(nil) if err != nil { @@ -529,88 +538,65 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) { dnsOptions := resolvconf.GetOptions(conf.Content) nameServers := resolvconf.GetNameservers(conf.Content) - _, err = resolvconf.Build(filepath.Join(cniDir, "resolv.conf"), append([]string{resolveIP.String()}, nameServers...), searchDomains, dnsOptions) + _, err = resolvconf.Build(filepath.Join(rootlessNetNsDir, "resolv.conf"), append([]string{resolveIP.String()}, nameServers...), searchDomains, dnsOptions) if err != nil { - return nil, errors.Wrap(err, "failed to create rootless cni resolv.conf") + return nil, errors.Wrap(err, "failed to create rootless netns resolv.conf") } // create cni directories to store files // they will be bind mounted to the correct location in a extra mount ns - err = os.MkdirAll(filepath.Join(cniDir, strings.TrimPrefix(persistentCNIDir, "/")), 0700) + err = os.MkdirAll(filepath.Join(rootlessNetNsDir, persistentCNIDir), 0700) if err != nil { - return nil, errors.Wrap(err, "could not create rootless-cni var directory") + return nil, errors.Wrap(err, "could not create rootless-netns var directory") } - runDir := filepath.Join(cniDir, "run") + runDir := filepath.Join(rootlessNetNsDir, "run") err = os.MkdirAll(runDir, 0700) if err != nil { - return nil, errors.Wrap(err, "could not create rootless-cni run directory") + return nil, errors.Wrap(err, "could not create rootless-netns run directory") } // relabel the new run directory to the iptables /run label // this is important, otherwise the iptables command will fail err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false) if err != nil { - return nil, errors.Wrap(err, "could not create relabel rootless-cni run directory") + return nil, errors.Wrap(err, "could not create relabel rootless-netns run directory") } // create systemd run directory err = os.MkdirAll(filepath.Join(runDir, "systemd"), 0700) if err != nil { - return nil, errors.Wrap(err, "could not create rootless-cni systemd directory") + return nil, errors.Wrap(err, "could not create rootless-netns systemd directory") } // create the directory for the netns files at the same location - // relative to the rootless-cni location - err = os.MkdirAll(filepath.Join(cniDir, nsDir), 0700) + // relative to the rootless-netns location + err = os.MkdirAll(filepath.Join(rootlessNetNsDir, nsDir), 0700) if err != nil { - return nil, errors.Wrap(err, "could not create rootless-cni netns directory") + return nil, errors.Wrap(err, "could not create rootless-netns netns directory") } } - // The CNI plugins need access to iptables in $PATH. As it turns out debian doesn't put - // /usr/sbin in $PATH for rootless users. This will break rootless cni completely. + // The CNI plugins and netavark need access to iptables in $PATH. As it turns out debian doesn't put + // /usr/sbin in $PATH for rootless users. This will break rootless networking completely. // We might break existing users and we cannot expect everyone to change their $PATH so // lets add /usr/sbin to $PATH ourselves. path = os.Getenv("PATH") if !strings.Contains(path, "/usr/sbin") { - path = path + ":/usr/sbin" + path += ":/usr/sbin" os.Setenv("PATH", path) } - // Important set rootlessCNINS as last step. + // Important set rootlessNetNS as last step. // Do not return any errors after this. - rootlessCNINS = &RootlessCNI{ + rootlessNetNS = &RootlessNetNS{ ns: ns, - dir: cniDir, + dir: rootlessNetNsDir, Lock: lock, } - return rootlessCNINS, nil -} - -// setPrimaryMachineIP is used for podman-machine and it sets -// and environment variable with the IP address of the podman-machine -// host. -func setPrimaryMachineIP() error { - // no connection is actually made here - conn, err := net.Dial("udp", "8.8.8.8:80") - if err != nil { - return err - } - defer func() { - if err := conn.Close(); err != nil { - logrus.Error(err) - } - }() - addr := conn.LocalAddr().(*net.UDPAddr) - return os.Setenv("PODMAN_MACHINE_HOST", addr.IP.String()) + return rootlessNetNS, nil } // setUpNetwork will set up the the networks, on error it will also tear down the cni -// networks. If rootless it will join/create the rootless cni namespace. +// networks. If rootless it will join/create the rootless network namespace. func (r *Runtime) setUpNetwork(ns string, opts types.NetworkOptions) (map[string]types.StatusBlock, error) { - if r.config.MachineEnabled() { - if err := setPrimaryMachineIP(); err != nil { - return nil, err - } - } - rootlessCNINS, err := r.GetRootlessCNINetNs(true) + rootlessNetNS, err := r.GetRootlessNetNs(true) if err != nil { return nil, err } @@ -619,11 +605,11 @@ func (r *Runtime) setUpNetwork(ns string, opts types.NetworkOptions) (map[string results, err = r.network.Setup(ns, types.SetupOptions{NetworkOptions: opts}) return err } - // rootlessCNINS is nil if we are root - if rootlessCNINS != nil { - // execute the cni setup in the rootless net ns - err = rootlessCNINS.Do(setUpPod) - rootlessCNINS.Lock.Unlock() + // rootlessNetNS is nil if we are root + if rootlessNetNS != nil { + // execute the setup in the rootless net ns + err = rootlessNetNS.Do(setUpPod) + rootlessNetNS.Lock.Unlock() } else { err = setUpPod() } @@ -643,8 +629,22 @@ func getCNIPodName(c *Container) string { } // Create and configure a new network namespace for a container -func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (map[string]types.StatusBlock, error) { - networks, _, err := ctr.networks() +func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[string]types.StatusBlock, rerr error) { + if err := r.exposeMachinePorts(ctr.config.PortMappings); err != nil { + return nil, err + } + defer func() { + // make sure to unexpose the gvproxy ports when an error happens + if rerr != nil { + if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { + logrus.Errorf("failed to free gvproxy machine ports: %v", err) + } + } + }() + if ctr.config.NetMode.IsSlirp4netns() { + return nil, r.setupSlirp4netns(ctr, ctrNS) + } + networks, err := ctr.networks() if err != nil { return nil, err } @@ -654,11 +654,25 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (map[string]typ return nil, nil } - netOpts, err := ctr.getNetworkOptions() + netOpts := ctr.getNetworkOptions(networks) + netStatus, err := r.setUpNetwork(ctrNS.Path(), netOpts) if err != nil { return nil, err } - return r.setUpNetwork(ctrNS.Path(), netOpts) + + // setup rootless port forwarder when rootless with ports and the network status is empty, + // if this is called from network reload the network status will not be empty and we should + // not setup port because they are still active + if rootless.IsRootless() && len(ctr.config.PortMappings) > 0 && ctr.getNetworkStatus() == nil { + // set up port forwarder for rootless netns + netnsPath := ctrNS.Path() + // TODO: support slirp4netns port forwarder as well + // make sure to fix this in container.handleRestartPolicy() as well + // Important we have to call this after r.setUpNetwork() so that + // we can use the proper netStatus + err = r.setupRootlessPortMappingViaRLK(ctr, netnsPath, netStatus) + } + return netStatus, err } // Create and configure a new network namespace for a container @@ -681,31 +695,10 @@ func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q map[string]types.St logrus.Debugf("Made network namespace at %s for container %s", ctrNS.Path(), ctr.ID()) var networkStatus map[string]types.StatusBlock - if !ctr.config.NetMode.IsSlirp4netns() { - networkStatus, err = r.configureNetNS(ctr, ctrNS) - } + networkStatus, err = r.configureNetNS(ctr, ctrNS) return ctrNS, networkStatus, err } -// Configure the network namespace for a rootless container -func (r *Runtime) setupRootlessNetNS(ctr *Container) error { - if ctr.config.NetMode.IsSlirp4netns() { - return r.setupSlirp4netns(ctr) - } - networks, _, err := ctr.networks() - if err != nil { - return err - } - if len(networks) > 0 && len(ctr.config.PortMappings) > 0 { - // set up port forwarder for CNI-in-slirp4netns - netnsPath := ctr.state.NetNS.Path() - // TODO: support slirp4netns port forwarder as well - // make sure to fix this container.handleRestartPolicy() as well - return r.setupRootlessPortMappingViaRLK(ctr, netnsPath) - } - return nil -} - // Configure the network namespace using the container process func (r *Runtime) setupNetNS(ctr *Container) error { nsProcess := fmt.Sprintf("/proc/%d/ns/net", ctr.state.PID) @@ -719,7 +712,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error { if err != nil { return err } - nsPath = filepath.Join(nsPath, fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])) + nsPath = filepath.Join(nsPath, fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])) if err := os.MkdirAll(filepath.Dir(nsPath), 0711); err != nil { return err @@ -777,10 +770,10 @@ func (r *Runtime) closeNetNS(ctr *Container) error { return nil } -// Tear down a container's CNI network configuration and joins the +// Tear down a container's network configuration and joins the // rootless net ns as rootless user func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error { - rootlessCNINS, err := r.GetRootlessCNINetNs(false) + rootlessNetNS, err := r.GetRootlessNetNs(false) if err != nil { return err } @@ -789,14 +782,14 @@ func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error { return errors.Wrapf(err, "error tearing down network namespace configuration for container %s", opts.ContainerID) } - // rootlessCNINS is nil if we are root - if rootlessCNINS != nil { + // rootlessNetNS is nil if we are root + if rootlessNetNS != nil { // execute the cni setup in the rootless net ns - err = rootlessCNINS.Do(tearDownPod) - rootlessCNINS.Lock.Unlock() - if err == nil { - err = rootlessCNINS.Cleanup(r) + err = rootlessNetNS.Do(tearDownPod) + if cerr := rootlessNetNS.Cleanup(r); cerr != nil { + logrus.WithError(err).Error("failed to cleanup rootless netns") } + rootlessNetNS.Lock.Unlock() } else { err = tearDownPod() } @@ -813,16 +806,13 @@ func (r *Runtime) teardownCNI(ctr *Container) error { logrus.Debugf("Tearing down network namespace at %s for container %s", ctr.state.NetNS.Path(), ctr.ID()) - networks, _, err := ctr.networks() + networks, err := ctr.networks() if err != nil { return err } if !ctr.config.NetMode.IsSlirp4netns() && len(networks) > 0 { - netOpts, err := ctr.getNetworkOptions() - if err != nil { - return err - } + netOpts := ctr.getNetworkOptions(networks) return r.teardownNetwork(ctr.state.NetNS.Path(), netOpts) } return nil @@ -830,6 +820,10 @@ func (r *Runtime) teardownCNI(ctr *Container) error { // Tear down a network namespace, undoing all state associated with it. func (r *Runtime) teardownNetNS(ctr *Container) error { + if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { + // do not return an error otherwise we would prevent network cleanup + logrus.Errorf("failed to free gvproxy machine ports: %v", err) + } if err := r.teardownCNI(ctr); err != nil { return err } @@ -849,21 +843,25 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { return nil } -func getContainerNetNS(ctr *Container) (string, error) { +func getContainerNetNS(ctr *Container) (string, *Container, error) { if ctr.state.NetNS != nil { - return ctr.state.NetNS.Path(), nil + return ctr.state.NetNS.Path(), nil, nil } if ctr.config.NetNsCtr != "" { c, err := ctr.runtime.GetContainer(ctr.config.NetNsCtr) if err != nil { - return "", err + return "", nil, err } if err = c.syncContainer(); err != nil { - return "", err + return "", c, err + } + netNs, c2, err := getContainerNetNS(c) + if c2 != nil { + c = c2 } - return getContainerNetNS(c) + return netNs, c, err } - return "", nil + return "", nil, nil } // isBridgeNetMode checks if the given network mode is bridge. @@ -907,51 +905,35 @@ func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.Statu } } - aliases, err := ctr.GetAllNetworkAliases() + networkOpts, err := ctr.networks() if err != nil { return nil, err } // Set the same network settings as before.. netStatus := ctr.getNetworkStatus() - netOpts := make(map[string]types.PerNetworkOptions, len(netStatus)) - for network, status := range netStatus { - perNetOpts := types.PerNetworkOptions{} - for name, netInt := range status.Interfaces { - perNetOpts = types.PerNetworkOptions{ - InterfaceName: name, - Aliases: aliases[network], - StaticMAC: netInt.MacAddress, - } - for _, netAddress := range netInt.Networks { - perNetOpts.StaticIPs = append(perNetOpts.StaticIPs, netAddress.Subnet.IP) + for network, perNetOpts := range networkOpts { + for name, netInt := range netStatus[network].Interfaces { + perNetOpts.InterfaceName = name + perNetOpts.StaticMAC = netInt.MacAddress + for _, netAddress := range netInt.Subnets { + perNetOpts.StaticIPs = append(perNetOpts.StaticIPs, netAddress.IPNet.IP) } // Normally interfaces have a length of 1, only for some special cni configs we could get more. // For now just use the first interface to get the ips this should be good enough for most cases. break } - if perNetOpts.InterfaceName == "" { - eth, exists := ctr.state.NetInterfaceDescriptions.getInterfaceByName(network) - if !exists { - return nil, errors.Errorf("no network interface name for container %s on network %s", ctr.config.ID, network) - } - perNetOpts.InterfaceName = eth - } - netOpts[network] = perNetOpts + networkOpts[network] = perNetOpts } - ctr.perNetworkOpts = netOpts + ctr.perNetworkOpts = networkOpts return r.configureNetNS(ctr, ctr.state.NetNS) } func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { var netStats *netlink.LinkStatistics - // With slirp4netns, we can't collect statistics at present. - // For now, we allow stats to at least run by returning nil - if rootless.IsRootless() || ctr.config.NetMode.IsSlirp4netns() { - return netStats, nil - } - netNSPath, netPathErr := getContainerNetNS(ctr) + + netNSPath, otherCtr, netPathErr := getContainerNetNS(ctr) if netPathErr != nil { return nil, netPathErr } @@ -960,9 +942,18 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { // this is a valid state and thus return no error, nor any statistics return nil, nil } + + // FIXME get the interface from the container netstatus + dev := "eth0" + netMode := ctr.config.NetMode + if otherCtr != nil { + netMode = otherCtr.config.NetMode + } + if netMode.IsSlirp4netns() { + dev = "tap0" + } err := ns.WithNetNSPath(netNSPath, func(_ ns.NetNS) error { - // FIXME get the interface from the container netstatus - link, err := netlink.LinkByName("eth0") + link, err := netlink.LinkByName(dev) if err != nil { return err } @@ -996,25 +987,31 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e settings := new(define.InspectNetworkSettings) settings.Ports = makeInspectPortBindings(c.config.PortMappings, c.config.ExposedPorts) - networks, isDefault, err := c.networks() + networks, err := c.networks() if err != nil { return nil, err } - // We can't do more if the network is down. if c.state.NetNS == nil { + if networkNSPath := c.joinedNetworkNSPath(); networkNSPath != "" { + if result, err := c.inspectJoinedNetworkNS(networkNSPath); err == nil { + // fallback to dummy configuration + settings.InspectBasicNetworkConfig = resultToBasicNetworkConfig(result) + return settings, nil + } + // do not propagate error inspecting a joined network ns + logrus.Errorf("Inspecting network namespace: %s of container %s: %v", networkNSPath, c.ID(), err) + } + // We can't do more if the network is down. + // We still want to make dummy configurations for each CNI net // the container joined. if len(networks) > 0 { settings.Networks = make(map[string]*define.InspectAdditionalNetwork, len(networks)) - for _, net := range networks { + for net, opts := range networks { cniNet := new(define.InspectAdditionalNetwork) cniNet.NetworkID = net - aliases, err := c.GetNetworkAliases(net) - if err != nil { - return nil, err - } - cniNet.Aliases = aliases + cniNet.Aliases = opts.Aliases settings.Networks[net] = cniNet } } @@ -1039,28 +1036,20 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e settings.Networks = make(map[string]*define.InspectAdditionalNetwork) - for _, name := range networks { + for name, opts := range networks { result := netStatus[name] addedNet := new(define.InspectAdditionalNetwork) addedNet.NetworkID = name - - basicConfig, err := resultToBasicNetworkConfig(result) - if err != nil { - return nil, err - } - - aliases, err := c.GetNetworkAliases(name) - if err != nil { - return nil, err - } - addedNet.Aliases = aliases - - addedNet.InspectBasicNetworkConfig = basicConfig + addedNet.Aliases = opts.Aliases + addedNet.InspectBasicNetworkConfig = resultToBasicNetworkConfig(result) settings.Networks[name] = addedNet } - if !isDefault { + // if not only the default network is connected we can return here + // otherwise we have to populate the InspectBasicNetworkConfig settings + _, isDefaultNet := networks[c.runtime.config.Network.DefaultNetwork] + if !(len(networks) == 1 && isDefaultNet) { return settings, nil } } @@ -1072,63 +1061,109 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e if len(netStatus) == 1 { for _, status := range netStatus { - basicConfig, err := resultToBasicNetworkConfig(status) - if err != nil { - return nil, err - } - settings.InspectBasicNetworkConfig = basicConfig + settings.InspectBasicNetworkConfig = resultToBasicNetworkConfig(status) } } return settings, nil } -// setupNetworkDescriptions adds networks and eth values to the container's -// network descriptions -func (c *Container) setupNetworkDescriptions(networks []string) error { - // if the map is nil and we have networks - if c.state.NetInterfaceDescriptions == nil && len(networks) > 0 { - c.state.NetInterfaceDescriptions = make(ContainerNetworkDescriptions) - } - origLen := len(c.state.NetInterfaceDescriptions) - for _, n := range networks { - // if the network is not in the map, add it - if _, exists := c.state.NetInterfaceDescriptions[n]; !exists { - c.state.NetInterfaceDescriptions.add(n) +func (c *Container) joinedNetworkNSPath() string { + for _, namespace := range c.config.Spec.Linux.Namespaces { + if namespace.Type == spec.NetworkNamespace { + return namespace.Path } } - // if the map changed, we need to save the container state - if origLen != len(c.state.NetInterfaceDescriptions) { - if err := c.save(); err != nil { + return "" +} + +func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBlock, retErr error) { + var result types.StatusBlock + err := ns.WithNetNSPath(networkns, func(_ ns.NetNS) error { + ifaces, err := net.Interfaces() + if err != nil { return err } - } - return nil + routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL) + if err != nil { + return err + } + var gateway net.IP + for _, route := range routes { + // default gateway + if route.Dst == nil { + gateway = route.Gw + } + } + result.Interfaces = make(map[string]types.NetInterface) + for _, iface := range ifaces { + if iface.Flags&net.FlagLoopback != 0 { + continue + } + addrs, err := iface.Addrs() + if err != nil { + continue + } + if len(addrs) == 0 { + continue + } + subnets := make([]types.NetAddress, 0, len(addrs)) + for _, address := range addrs { + if ipnet, ok := address.(*net.IPNet); ok { + if ipnet.IP.IsLinkLocalMulticast() || ipnet.IP.IsLinkLocalUnicast() { + continue + } + subnet := types.NetAddress{ + IPNet: types.IPNet{ + IPNet: *ipnet, + }, + } + if ipnet.Contains(gateway) { + subnet.Gateway = gateway + } + subnets = append(subnets, subnet) + } + } + result.Interfaces[iface.Name] = types.NetInterface{ + Subnets: subnets, + MacAddress: types.HardwareAddr(iface.HardwareAddr), + } + } + return nil + }) + return result, err } // resultToBasicNetworkConfig produces an InspectBasicNetworkConfig from a CNI // result -func resultToBasicNetworkConfig(result types.StatusBlock) (define.InspectBasicNetworkConfig, error) { +func resultToBasicNetworkConfig(result types.StatusBlock) define.InspectBasicNetworkConfig { config := define.InspectBasicNetworkConfig{} - for _, netInt := range result.Interfaces { - for _, netAddress := range netInt.Networks { - size, _ := netAddress.Subnet.Mask.Size() - if netAddress.Subnet.IP.To4() != nil { - //ipv4 + interfaceNames := make([]string, 0, len(result.Interfaces)) + for interfaceName := range result.Interfaces { + interfaceNames = append(interfaceNames, interfaceName) + } + // ensure consistent inspect results by sorting + sort.Strings(interfaceNames) + for _, interfaceName := range interfaceNames { + netInt := result.Interfaces[interfaceName] + for _, netAddress := range netInt.Subnets { + size, _ := netAddress.IPNet.Mask.Size() + if netAddress.IPNet.IP.To4() != nil { + // ipv4 if config.IPAddress == "" { - config.IPAddress = netAddress.Subnet.IP.String() + config.IPAddress = netAddress.IPNet.IP.String() config.IPPrefixLen = size config.Gateway = netAddress.Gateway.String() } else { - config.SecondaryIPAddresses = append(config.SecondaryIPAddresses, netAddress.Subnet.IP.String()) + config.SecondaryIPAddresses = append(config.SecondaryIPAddresses, define.Address{Addr: netAddress.IPNet.IP.String(), PrefixLength: size}) } } else { - //ipv6 + // ipv6 if config.GlobalIPv6Address == "" { - config.GlobalIPv6Address = netAddress.Subnet.IP.String() + config.GlobalIPv6Address = netAddress.IPNet.IP.String() config.GlobalIPv6PrefixLen = size config.IPv6Gateway = netAddress.Gateway.String() } else { - config.SecondaryIPv6Addresses = append(config.SecondaryIPv6Addresses, netAddress.Subnet.IP.String()) + config.SecondaryIPv6Addresses = append(config.SecondaryIPv6Addresses, define.Address{Addr: netAddress.IPNet.IP.String(), PrefixLength: size}) } } } @@ -1138,7 +1173,7 @@ func resultToBasicNetworkConfig(result types.StatusBlock) (define.InspectBasicNe config.AdditionalMacAddresses = append(config.AdditionalMacAddresses, netInt.MacAddress.String()) } } - return config, nil + return config } type logrusDebugWriter struct { @@ -1160,7 +1195,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro c.lock.Lock() defer c.lock.Unlock() - networks, err := c.networksByNameIndex() + networks, err := c.networks() if err != nil { return err } @@ -1200,17 +1235,9 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro ContainerID: c.config.ID, ContainerName: getCNIPodName(c), } - if len(c.config.PortMappings) > 0 { - opts.PortMappings = ocicniPortsToNetTypesPorts(c.config.PortMappings) - } - eth, exists := c.state.NetInterfaceDescriptions.getInterfaceByName(netName) - if !exists { - return errors.Errorf("no network interface name for container %s on network %s", c.config.ID, netName) - } + opts.PortMappings = c.convertPortMappings() opts.Networks = map[string]types.PerNetworkOptions{ - netName: { - InterfaceName: eth, - }, + netName: networks[netName], } if err := c.runtime.teardownNetwork(c.state.NetNS.Path(), opts); err != nil { @@ -1218,6 +1245,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro } // update network status if container is running + oldStatus, statusExist := networkStatus[netName] delete(networkStatus, netName) c.state.NetworkStatus = networkStatus err = c.save() @@ -1228,13 +1256,51 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro // Reload ports when there are still connected networks, maybe we removed the network interface with the child ip. // Reloading without connected networks does not make sense, so we can skip this step. if rootless.IsRootless() && len(networkStatus) > 0 { - return c.reloadRootlessRLKPortMapping() + if err := c.reloadRootlessRLKPortMapping(); err != nil { + return err + } + } + + // Update resolv.conf if required + if statusExist { + stringIPs := make([]string, 0, len(oldStatus.DNSServerIPs)) + for _, ip := range oldStatus.DNSServerIPs { + stringIPs = append(stringIPs, ip.String()) + } + if len(stringIPs) > 0 { + logrus.Debugf("Removing DNS Servers %v from resolv.conf", stringIPs) + if err := c.removeNameserver(stringIPs); err != nil { + return err + } + } + + // update /etc/hosts file + if file, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { + // sync the names with c.getHostsEntries() + names := []string{c.Hostname(), c.config.Name} + rm := etchosts.GetNetworkHostEntries(map[string]types.StatusBlock{netName: oldStatus}, names...) + if len(rm) > 0 { + // make sure to lock this file to prevent concurrent writes when + // this is used a net dependency container + lock, err := lockfile.GetLockfile(file) + if err != nil { + return fmt.Errorf("failed to lock hosts file: %w", err) + } + logrus.Debugf("Remove /etc/hosts entries %v", rm) + lock.Lock() + err = etchosts.Remove(file, rm) + lock.Unlock() + if err != nil { + return err + } + } + } } return nil } // ConnectNetwork connects a container to a given network -func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) error { +func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNetworkOptions) error { // only the bridge mode supports cni networks if err := isBridgeNetMode(c.config.NetMode); err != nil { return err @@ -1243,7 +1309,7 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e c.lock.Lock() defer c.lock.Unlock() - networks, err := c.networksByNameIndex() + networks, err := c.networks() if err != nil { return err } @@ -1262,15 +1328,17 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e // get network status before we connect networkStatus := c.getNetworkStatus() - network, err := c.runtime.network.NetworkInspect(netName) - if err != nil { - return err - } - if !network.DNSEnabled && len(aliases) > 0 { - return errors.Wrapf(define.ErrInvalidArg, "cannot set network aliases for network %q because dns is disabled", netName) + // always add the short id as alias for docker compat + netOpts.Aliases = append(netOpts.Aliases, c.config.ID[:12]) + + if netOpts.InterfaceName == "" { + netOpts.InterfaceName = getFreeInterfaceName(networks) + if netOpts.InterfaceName == "" { + return errors.New("could not find free network interface name") + } } - if err := c.runtime.state.NetworkConnect(c, netName, aliases); err != nil { + if err := c.runtime.state.NetworkConnect(c, netName, netOpts); err != nil { return err } c.newNetworkEvent(events.NetworkConnect, netName) @@ -1281,32 +1349,13 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e return errors.Wrapf(define.ErrNoNetwork, "unable to connect %s to %s", nameOrID, netName) } - ctrNetworks, _, err := c.networks() - if err != nil { - return err - } - // Update network descriptions - if err := c.setupNetworkDescriptions(ctrNetworks); err != nil { - return err - } - opts := types.NetworkOptions{ ContainerID: c.config.ID, ContainerName: getCNIPodName(c), } - if len(c.config.PortMappings) > 0 { - opts.PortMappings = ocicniPortsToNetTypesPorts(c.config.PortMappings) - } - eth, exists := c.state.NetInterfaceDescriptions.getInterfaceByName(netName) - if !exists { - return errors.Errorf("no network interface name for container %s on network %s", c.config.ID, netName) - } - aliases = append(aliases, c.config.ID[:12]) + opts.PortMappings = c.convertPortMappings() opts.Networks = map[string]types.PerNetworkOptions{ - netName: { - Aliases: aliases, - InterfaceName: eth, - }, + netName: netOpts, } results, err := c.runtime.setUpNetwork(c.state.NetNS.Path(), opts) @@ -1317,6 +1366,13 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e return errors.New("when adding aliases, results must be of length 1") } + // we need to get the old host entries before we add the new one to the status + // if we do not add do it here we will get the wrong existing entries which will throw of the logic + // we could also copy the map but this does not seem worth it + // sync the hostNames with c.getHostsEntries() + hostNames := []string{c.Hostname(), c.config.Name} + oldHostEntries := etchosts.GetNetworkHostEntries(networkStatus, hostNames...) + // update network status if networkStatus == nil { networkStatus = make(map[string]types.StatusBlock, 1) @@ -1328,14 +1384,74 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e if err != nil { return err } + // The first network needs a port reload to set the correct child ip for the rootlessport process. // Adding a second network does not require a port reload because the child ip is still valid. if rootless.IsRootless() && len(networks) == 0 { - return c.reloadRootlessRLKPortMapping() + if err := c.reloadRootlessRLKPortMapping(); err != nil { + return err + } + } + + ipv6, err := c.checkForIPv6(networkStatus) + if err != nil { + return err + } + + // Update resolv.conf if required + stringIPs := make([]string, 0, len(results[netName].DNSServerIPs)) + for _, ip := range results[netName].DNSServerIPs { + if (ip.To4() == nil) && !ipv6 { + continue + } + stringIPs = append(stringIPs, ip.String()) + } + if len(stringIPs) > 0 { + logrus.Debugf("Adding DNS Servers %v to resolv.conf", stringIPs) + if err := c.addNameserver(stringIPs); err != nil { + return err + } } + + // update /etc/hosts file + if file, ok := c.state.BindMounts[config.DefaultHostsFile]; ok { + // make sure to lock this file to prevent concurrent writes when + // this is used a net dependency container + lock, err := lockfile.GetLockfile(file) + if err != nil { + return fmt.Errorf("failed to lock hosts file: %w", err) + } + new := etchosts.GetNetworkHostEntries(results, hostNames...) + logrus.Debugf("Add /etc/hosts entries %v", new) + // use special AddIfExists API to make sure we only add new entries if an old one exists + // see the AddIfExists() comment for more information + lock.Lock() + err = etchosts.AddIfExists(file, oldHostEntries, new) + lock.Unlock() + if err != nil { + return err + } + } + return nil } +// get a free interface name for a new network +// return an empty string if no free name was found +func getFreeInterfaceName(networks map[string]types.PerNetworkOptions) string { + ifNames := make([]string, 0, len(networks)) + for _, opts := range networks { + ifNames = append(ifNames, opts.InterfaceName) + } + for i := 0; i < 100000; i++ { + ifName := fmt.Sprintf("eth%d", i) + if !util.StringInSlice(ifName, ifNames) { + return ifName + } + } + return "" +} + // DisconnectContainerFromNetwork removes a container from its CNI network func (r *Runtime) DisconnectContainerFromNetwork(nameOrID, netName string, force bool) error { ctr, err := r.LookupContainer(nameOrID) @@ -1346,12 +1462,12 @@ func (r *Runtime) DisconnectContainerFromNetwork(nameOrID, netName string, force } // ConnectContainerToNetwork connects a container to a CNI network -func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, aliases []string) error { +func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, netOpts types.PerNetworkOptions) error { ctr, err := r.LookupContainer(nameOrID) if err != nil { return err } - return ctr.NetworkConnect(nameOrID, netName, aliases) + return ctr.NetworkConnect(nameOrID, netName, netOpts) } // normalizeNetworkName takes a network name, a partial or a full network ID and returns the network name. @@ -1364,16 +1480,67 @@ func (r *Runtime) normalizeNetworkName(nameOrID string) (string, error) { return net.Name, nil } +// ocicniPortsToNetTypesPorts convert the old port format to the new one +// while deduplicating ports into ranges func ocicniPortsToNetTypesPorts(ports []types.OCICNIPortMapping) []types.PortMapping { + if len(ports) == 0 { + return nil + } + newPorts := make([]types.PortMapping, 0, len(ports)) - for _, port := range ports { - newPorts = append(newPorts, types.PortMapping{ - HostIP: port.HostIP, - HostPort: uint16(port.HostPort), - ContainerPort: uint16(port.ContainerPort), - Protocol: port.Protocol, - Range: 1, - }) + + // first sort the ports + sort.Slice(ports, func(i, j int) bool { + return compareOCICNIPorts(ports[i], ports[j]) + }) + + // we already check if the slice is empty so we can use the first element + currentPort := types.PortMapping{ + HostIP: ports[0].HostIP, + HostPort: uint16(ports[0].HostPort), + ContainerPort: uint16(ports[0].ContainerPort), + Protocol: ports[0].Protocol, + Range: 1, + } + + for i := 1; i < len(ports); i++ { + if ports[i].HostIP == currentPort.HostIP && + ports[i].Protocol == currentPort.Protocol && + ports[i].HostPort-int32(currentPort.Range) == int32(currentPort.HostPort) && + ports[i].ContainerPort-int32(currentPort.Range) == int32(currentPort.ContainerPort) { + currentPort.Range++ + } else { + newPorts = append(newPorts, currentPort) + currentPort = types.PortMapping{ + HostIP: ports[i].HostIP, + HostPort: uint16(ports[i].HostPort), + ContainerPort: uint16(ports[i].ContainerPort), + Protocol: ports[i].Protocol, + Range: 1, + } + } } + newPorts = append(newPorts, currentPort) return newPorts } + +// compareOCICNIPorts will sort the ocicni ports by +// 1) host ip +// 2) protocol +// 3) hostPort +// 4) container port +func compareOCICNIPorts(i, j types.OCICNIPortMapping) bool { + if i.HostIP != j.HostIP { + return i.HostIP < j.HostIP + } + + if i.Protocol != j.Protocol { + return i.Protocol < j.Protocol + } + + if i.HostPort != j.HostPort { + return i.HostPort < j.HostPort + } + + return i.ContainerPort < j.ContainerPort +} diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_machine.go b/vendor/github.com/containers/podman/v4/libpod/networking_machine.go new file mode 100644 index 00000000000..7b8eb94df40 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/networking_machine.go @@ -0,0 +1,133 @@ +package libpod + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/machine" + "github.com/sirupsen/logrus" +) + +const machineGvproxyEndpoint = "gateway.containers.internal" + +// machineExpose is the struct for the gvproxy port forwarding api send via json +type machineExpose struct { + // Local is the local address on the vm host, format is ip:port + Local string `json:"local"` + // Remote is used to specify the vm ip:port + Remote string `json:"remote,omitempty"` + // Protocol to forward, tcp or udp + Protocol string `json:"protocol"` +} + +func requestMachinePorts(expose bool, ports []types.PortMapping) error { + url := "http://" + machineGvproxyEndpoint + "/services/forwarder/" + if expose { + url += "expose" + } else { + url += "unexpose" + } + ctx := context.Background() + client := &http.Client{ + Transport: &http.Transport{ + // make sure to not set a proxy here so explicitly ignore the proxy + // since we want to talk directly to gvproxy + // https://github.com/containers/podman/issues/13628 + Proxy: nil, + MaxIdleConns: 50, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + } + buf := new(bytes.Buffer) + for num, port := range ports { + protocols := strings.Split(port.Protocol, ",") + for _, protocol := range protocols { + for i := uint16(0); i < port.Range; i++ { + machinePort := machineExpose{ + Local: net.JoinHostPort(port.HostIP, strconv.FormatInt(int64(port.HostPort+i), 10)), + Protocol: protocol, + } + if expose { + // only set the remote port the ip will be automatically be set by gvproxy + machinePort.Remote = ":" + strconv.FormatInt(int64(port.HostPort+i), 10) + } + + // post request + if err := json.NewEncoder(buf).Encode(machinePort); err != nil { + if expose { + // in case of an error make sure to unexpose the other ports + if cerr := requestMachinePorts(false, ports[:num]); cerr != nil { + logrus.Errorf("failed to free gvproxy machine ports: %v", cerr) + } + } + return err + } + if err := makeMachineRequest(ctx, client, url, buf); err != nil { + if expose { + // in case of an error make sure to unexpose the other ports + if cerr := requestMachinePorts(false, ports[:num]); cerr != nil { + logrus.Errorf("failed to free gvproxy machine ports: %v", cerr) + } + } + return err + } + buf.Reset() + } + } + } + return nil +} + +func makeMachineRequest(ctx context.Context, client *http.Client, url string, buf io.Reader) error { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, buf) + if err != nil { + return err + } + req.Header.Add("Accept", "application/json") + req.Header.Add("Content-Type", "application/json") + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return annotateGvproxyResponseError(resp.Body) + } + return nil +} + +func annotateGvproxyResponseError(r io.Reader) error { + b, err := ioutil.ReadAll(r) + if err == nil && len(b) > 0 { + return fmt.Errorf("something went wrong with the request: %q", string(b)) + } + return errors.New("something went wrong with the request, could not read response") +} + +// exposeMachinePorts exposes the ports for podman machine via gvproxy +func (r *Runtime) exposeMachinePorts(ports []types.PortMapping) error { + if !machine.IsGvProxyBased() { + return nil + } + return requestMachinePorts(true, ports) +} + +// unexposeMachinePorts closes the ports for podman machine via gvproxy +func (r *Runtime) unexposeMachinePorts(ports []types.PortMapping) error { + if !machine.IsGvProxyBased() { + return nil + } + return requestMachinePorts(false, ports) +} diff --git a/vendor/github.com/containers/podman/v3/libpod/networking_slirp4netns.go b/vendor/github.com/containers/podman/v4/libpod/networking_slirp4netns.go similarity index 76% rename from vendor/github.com/containers/podman/v3/libpod/networking_slirp4netns.go rename to vendor/github.com/containers/podman/v4/libpod/networking_slirp4netns.go index 46cda89a9f0..788834435a7 100644 --- a/vendor/github.com/containers/podman/v3/libpod/networking_slirp4netns.go +++ b/vendor/github.com/containers/podman/v4/libpod/networking_slirp4netns.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod @@ -13,13 +14,16 @@ import ( "path/filepath" "strconv" "strings" + "sync" "syscall" "time" - "github.com/containers/podman/v3/pkg/errorhandling" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/rootlessport" - "github.com/containers/podman/v3/pkg/servicereaper" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/rootlessport" + "github.com/containers/podman/v4/pkg/servicereaper" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -37,9 +41,9 @@ type slirpFeatures struct { type slirp4netnsCmdArg struct { Proto string `json:"proto,omitempty"` HostAddr string `json:"host_addr"` - HostPort int32 `json:"host_port"` + HostPort uint16 `json:"host_port"` GuestAddr string `json:"guest_addr"` - GuestPort int32 `json:"guest_port"` + GuestPort uint16 `json:"guest_port"` } type slirp4netnsCmd struct { @@ -58,6 +62,8 @@ type slirp4netnsNetworkOptions struct { outboundAddr6 string } +const ipv6ConfDefaultAcceptDadSysctl = "/proc/sys/net/ipv6/conf/default/accept_dad" + func checkSlirpFlags(path string) (*slirpFeatures, error) { cmd := exec.Command(path, "--help") out, err := cmd.CombinedOutput() @@ -76,12 +82,15 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) { } func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4netnsNetworkOptions, error) { - slirpOptions := append(r.config.Engine.NetworkCmdOptions, extraOptions...) + slirpOptions := make([]string, 0, len(r.config.Engine.NetworkCmdOptions)+len(extraOptions)) + slirpOptions = append(slirpOptions, r.config.Engine.NetworkCmdOptions...) + slirpOptions = append(slirpOptions, extraOptions...) slirp4netnsOpts := &slirp4netnsNetworkOptions{ // overwrite defaults disableHostLoopback: true, mtu: slirp4netnsMTU, noPivotRoot: r.config.Engine.NoPivotRoot, + enableIPv6: true, } for _, o := range slirpOptions { parts := strings.SplitN(o, "=", 2) @@ -204,14 +213,13 @@ func createBasicSlirp4netnsCmdArgs(options *slirp4netnsNetworkOptions, features } // setupSlirp4netns can be called in rootful as well as in rootless -func (r *Runtime) setupSlirp4netns(ctr *Container) error { +func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error { path := r.config.Engine.NetworkCmdPath if path == "" { var err error path, err = exec.LookPath("slirp4netns") if err != nil { - logrus.Errorf("Could not find slirp4netns, the network namespace won't be configured: %v", err) - return nil + return fmt.Errorf("could not find slirp4netns, the network namespace can't be configured: %w", err) } } @@ -260,7 +268,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error { if err != nil { return errors.Wrapf(err, "failed to create rootless network sync pipe") } - netnsPath = ctr.state.NetNS.Path() + netnsPath = netns.Path() cmdArgs = append(cmdArgs, "--netns-type=path", netnsPath, "tap0") } else { defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR) @@ -297,7 +305,58 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error { } cmd.Stdout = logFile cmd.Stderr = logFile + + var slirpReadyWg, netnsReadyWg *sync.WaitGroup + if netOptions.enableIPv6 { + // use two wait groups to make sure we set the sysctl before + // starting slirp and reset it only after slirp is ready + slirpReadyWg = &sync.WaitGroup{} + netnsReadyWg = &sync.WaitGroup{} + slirpReadyWg.Add(1) + netnsReadyWg.Add(1) + + go func() { + err := ns.WithNetNSPath(netnsPath, func(_ ns.NetNS) error { + // Duplicate Address Detection slows the ipv6 setup down for 1-2 seconds. + // Since slirp4netns is run it is own namespace and not directly routed + // we can skip this to make the ipv6 address immediately available. + // We change the default to make sure the slirp tap interface gets the + // correct value assigned so DAD is disabled for it + // Also make sure to change this value back to the original after slirp4netns + // is ready in case users rely on this sysctl. + orgValue, err := ioutil.ReadFile(ipv6ConfDefaultAcceptDadSysctl) + if err != nil { + netnsReadyWg.Done() + // on ipv6 disabled systems the sysctl does not exists + // so we should not error + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + err = ioutil.WriteFile(ipv6ConfDefaultAcceptDadSysctl, []byte("0"), 0644) + netnsReadyWg.Done() + if err != nil { + return err + } + + // wait until slirp4nets is ready before resetting this value + slirpReadyWg.Wait() + return ioutil.WriteFile(ipv6ConfDefaultAcceptDadSysctl, orgValue, 0644) + }) + if err != nil { + logrus.Warnf("failed to set net.ipv6.conf.default.accept_dad sysctl: %v", err) + } + }() + + // wait until we set the sysctl + netnsReadyWg.Wait() + } + if err := cmd.Start(); err != nil { + if netOptions.enableIPv6 { + slirpReadyWg.Done() + } return errors.Wrapf(err, "failed to start slirp4netns process") } defer func() { @@ -307,7 +366,11 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error { } }() - if err := waitForSync(syncR, cmd, logFile, 1*time.Second); err != nil { + err = waitForSync(syncR, cmd, logFile, 1*time.Second) + if netOptions.enableIPv6 { + slirpReadyWg.Done() + } + if err != nil { return err } @@ -327,7 +390,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error { if netOptions.isSlirpHostForward { return r.setupRootlessPortMappingViaSlirp(ctr, cmd, apiSocket) } - return r.setupRootlessPortMappingViaRLK(ctr, netnsPath) + return r.setupRootlessPortMappingViaRLK(ctr, netnsPath, nil) } return nil @@ -440,7 +503,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t return nil } -func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath string) error { +func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath string, netStatus map[string]types.StatusBlock) error { syncR, syncW, err := os.Pipe() if err != nil { return errors.Wrapf(err, "failed to open pipe") @@ -467,9 +530,9 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin } } - childIP := getRootlessPortChildIP(ctr) + childIP := getRootlessPortChildIP(ctr, netStatus) cfg := rootlessport.Config{ - Mappings: ctr.config.PortMappings, + Mappings: ctr.convertPortMappings(), NetNSPath: netnsPath, ExitFD: 3, ReadyFD: 4, @@ -484,13 +547,15 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin } cfgR := bytes.NewReader(cfgJSON) var stdout bytes.Buffer - cmd := exec.Command(fmt.Sprintf("/proc/%d/exe", os.Getpid())) - cmd.Args = []string{rootlessport.ReexecKey} - // Leak one end of the pipe in rootlessport process, the other will be sent to conmon - - if ctr.rootlessPortSyncR != nil { - defer errorhandling.CloseQuiet(ctr.rootlessPortSyncR) + path, err := r.config.FindHelperBinary(rootlessport.BinaryName, false) + if err != nil { + return err } + cmd := exec.Command(path) + cmd.Args = []string{rootlessport.BinaryName} + + // Leak one end of the pipe in rootlessport process, the other will be sent to conmon + defer errorhandling.CloseQuiet(ctr.rootlessPortSyncR) cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessPortSyncR, syncW) cmd.Stdin = cfgR @@ -552,61 +617,74 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd // for each port we want to add we need to open a connection to the slirp4netns control socket // and send the add_hostfwd command. - for _, i := range ctr.config.PortMappings { - conn, err := net.Dial("unix", apiSocket) - if err != nil { - return errors.Wrapf(err, "cannot open connection to %s", apiSocket) - } - defer func() { - if err := conn.Close(); err != nil { - logrus.Errorf("Unable to close connection: %q", err) + for _, port := range ctr.convertPortMappings() { + protocols := strings.Split(port.Protocol, ",") + for _, protocol := range protocols { + hostIP := port.HostIP + if hostIP == "" { + hostIP = "0.0.0.0" + } + for i := uint16(0); i < port.Range; i++ { + if err := openSlirp4netnsPort(apiSocket, protocol, hostIP, port.HostPort+i, port.ContainerPort+i); err != nil { + return err + } } - }() - hostIP := i.HostIP - if hostIP == "" { - hostIP = "0.0.0.0" - } - apiCmd := slirp4netnsCmd{ - Execute: "add_hostfwd", - Args: slirp4netnsCmdArg{ - Proto: i.Protocol, - HostAddr: hostIP, - HostPort: i.HostPort, - GuestPort: i.ContainerPort, - }, - } - // create the JSON payload and send it. Mark the end of request shutting down writes - // to the socket, as requested by slirp4netns. - data, err := json.Marshal(&apiCmd) - if err != nil { - return errors.Wrapf(err, "cannot marshal JSON for slirp4netns") - } - if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil { - return errors.Wrapf(err, "cannot write to control socket %s", apiSocket) - } - if err := conn.(*net.UnixConn).CloseWrite(); err != nil { - return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket) - } - buf := make([]byte, 2048) - readLength, err := conn.Read(buf) - if err != nil { - return errors.Wrapf(err, "cannot read from control socket %s", apiSocket) - } - // if there is no 'error' key in the received JSON data, then the operation was - // successful. - var y map[string]interface{} - if err := json.Unmarshal(buf[0:readLength], &y); err != nil { - return errors.Wrapf(err, "error parsing error status from slirp4netns") - } - if e, found := y["error"]; found { - return errors.Errorf("error from slirp4netns while setting up port redirection: %v", e) } } logrus.Debug("slirp4netns port-forwarding setup via add_hostfwd is ready") return nil } -func getRootlessPortChildIP(c *Container) string { +// openSlirp4netnsPort sends the slirp4netns pai quey to the given socket +func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport uint16) error { + conn, err := net.Dial("unix", apiSocket) + if err != nil { + return errors.Wrapf(err, "cannot open connection to %s", apiSocket) + } + defer func() { + if err := conn.Close(); err != nil { + logrus.Errorf("Unable to close slirp4netns connection: %q", err) + } + }() + apiCmd := slirp4netnsCmd{ + Execute: "add_hostfwd", + Args: slirp4netnsCmdArg{ + Proto: proto, + HostAddr: hostip, + HostPort: hostport, + GuestPort: guestport, + }, + } + // create the JSON payload and send it. Mark the end of request shutting down writes + // to the socket, as requested by slirp4netns. + data, err := json.Marshal(&apiCmd) + if err != nil { + return errors.Wrapf(err, "cannot marshal JSON for slirp4netns") + } + if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil { + return errors.Wrapf(err, "cannot write to control socket %s", apiSocket) + } + if err := conn.(*net.UnixConn).CloseWrite(); err != nil { + return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket) + } + buf := make([]byte, 2048) + readLength, err := conn.Read(buf) + if err != nil { + return errors.Wrapf(err, "cannot read from control socket %s", apiSocket) + } + // if there is no 'error' key in the received JSON data, then the operation was + // successful. + var y map[string]interface{} + if err := json.Unmarshal(buf[0:readLength], &y); err != nil { + return errors.Wrapf(err, "error parsing error status from slirp4netns") + } + if e, found := y["error"]; found { + return errors.Errorf("from slirp4netns while setting up port redirection: %v", e) + } + return nil +} + +func getRootlessPortChildIP(c *Container, netStatus map[string]types.StatusBlock) string { if c.config.NetMode.IsSlirp4netns() { slirp4netnsIP, err := GetSlirp4netnsIP(c.slirp4netnsSubnet) if err != nil { @@ -616,14 +694,14 @@ func getRootlessPortChildIP(c *Container) string { } var ipv6 net.IP - for _, status := range c.getNetworkStatus() { + for _, status := range netStatus { for _, netInt := range status.Interfaces { - for _, netAddress := range netInt.Networks { - ipv4 := netAddress.Subnet.IP.To4() + for _, netAddress := range netInt.Subnets { + ipv4 := netAddress.IPNet.IP.To4() if ipv4 != nil { return ipv4.String() } - ipv6 = netAddress.Subnet.IP + ipv6 = netAddress.IPNet.IP } } } @@ -636,15 +714,15 @@ func getRootlessPortChildIP(c *Container) string { // reloadRootlessRLKPortMapping will trigger a reload for the port mappings in the rootlessport process. // This should only be called by network connect/disconnect and only as rootless. func (c *Container) reloadRootlessRLKPortMapping() error { - childIP := getRootlessPortChildIP(c) + if len(c.config.PortMappings) == 0 { + return nil + } + childIP := getRootlessPortChildIP(c, c.state.NetworkStatus) logrus.Debugf("reloading rootless ports for container %s, childIP is %s", c.config.ID, childIP) conn, err := openUnixSocket(filepath.Join(c.runtime.config.Engine.TmpDir, "rp", c.config.ID)) if err != nil { - // This is not a hard error for backwards compatibility. A container started - // with an old version did not created the rootlessport socket. - logrus.Warnf("Could not reload rootless port mappings, port forwarding may no longer work correctly: %v", err) - return nil + return errors.Wrap(err, "could not reload rootless port mappings, port forwarding may no longer work correctly") } defer conn.Close() enc := json.NewEncoder(conn) diff --git a/vendor/github.com/containers/podman/v3/libpod/oci.go b/vendor/github.com/containers/podman/v4/libpod/oci.go similarity index 95% rename from vendor/github.com/containers/podman/v3/libpod/oci.go rename to vendor/github.com/containers/podman/v4/libpod/oci.go index c92d9a0778e..09f856ac7af 100644 --- a/vendor/github.com/containers/podman/v3/libpod/oci.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci.go @@ -3,7 +3,7 @@ package libpod import ( "net/http" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" ) // OCIRuntime is an implementation of an OCI runtime. @@ -23,7 +23,10 @@ type OCIRuntime interface { Path() string // CreateContainer creates the container in the OCI runtime. - CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error + // The returned int64 contains the microseconds needed to restore + // the given container if it is a restore and if restoreOptions.PrintStats + // is true. In all other cases the returned int64 is 0. + CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) // UpdateContainerStatus updates the status of the given container. UpdateContainerStatus(ctr *Container) error // StartContainer starts the given container. @@ -101,8 +104,10 @@ type OCIRuntime interface { // CheckpointContainer checkpoints the given container. // Some OCI runtimes may not support this - if SupportsCheckpoint() // returns false, this is not implemented, and will always return an - // error. - CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error + // error. If CheckpointOptions.PrintStats is true the first return parameter + // contains the number of microseconds the runtime needed to checkpoint + // the given container. + CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) (int64, error) // CheckConmonRunning verifies that the given container's Conmon // instance is still running. Runtimes without Conmon, or systems where diff --git a/vendor/github.com/containers/podman/v3/libpod/oci_attach_linux.go b/vendor/github.com/containers/podman/v4/libpod/oci_attach_linux.go similarity index 89% rename from vendor/github.com/containers/podman/v3/libpod/oci_attach_linux.go rename to vendor/github.com/containers/podman/v4/libpod/oci_attach_linux.go index d4d4a10767d..06f8f8719e5 100644 --- a/vendor/github.com/containers/podman/v3/libpod/oci_attach_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_attach_linux.go @@ -1,4 +1,5 @@ -//+build linux +//go:build linux +// +build linux package libpod @@ -8,12 +9,13 @@ import ( "net" "os" "path/filepath" + "syscall" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/errorhandling" - "github.com/containers/podman/v3/pkg/kubeutils" - "github.com/containers/podman/v3/utils" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/kubeutils" + "github.com/containers/podman/v4/utils" "github.com/moby/term" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -93,7 +95,7 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <- if attachRdy != nil { attachRdy <- true } - return readStdio(streams, receiveStdoutError, stdinDone) + return readStdio(conn, streams, receiveStdoutError, stdinDone) } // Attach to the given container's exec session @@ -143,7 +145,7 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se } // 2: read from attachFd that the parent process has set up the console socket - if _, err := readConmonPipeData(attachFd, ""); err != nil { + if _, err := readConmonPipeData(c.ociRuntime.Name(), attachFd, ""); err != nil { return err } @@ -174,7 +176,7 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se return err } - return readStdio(streams, receiveStdoutError, stdinDone) + return readStdio(conn, streams, receiveStdoutError, stdinDone) } func processDetachKeys(keys string) ([]byte, error) { @@ -217,11 +219,6 @@ func setupStdioChannels(streams *define.AttachStreams, conn *net.UnixConn, detac var err error if streams.AttachInput { _, err = utils.CopyDetachable(conn, streams.InputStream, detachKeys) - if err == nil { - if connErr := conn.CloseWrite(); connErr != nil { - logrus.Errorf("Unable to close conn: %q", connErr) - } - } } stdinDone <- err }() @@ -263,7 +260,7 @@ func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeO } } } - if er == io.EOF { + if errors.Is(er, io.EOF) || errors.Is(er, syscall.ECONNRESET) { break } if er != nil { @@ -274,15 +271,27 @@ func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeO return err } -func readStdio(streams *define.AttachStreams, receiveStdoutError, stdinDone chan error) error { +func readStdio(conn *net.UnixConn, streams *define.AttachStreams, receiveStdoutError, stdinDone chan error) error { var err error select { case err = <-receiveStdoutError: + if err := conn.CloseWrite(); err != nil { + logrus.Errorf("Failed to close stdin: %v", err) + } return err case err = <-stdinDone: if err == define.ErrDetach { + if err := conn.CloseWrite(); err != nil { + logrus.Errorf("Failed to close stdin: %v", err) + } return err } + if err == nil { + // copy stdin is done, close it + if connErr := conn.CloseWrite(); connErr != nil { + logrus.Errorf("Unable to close conn: %v", connErr) + } + } if streams.AttachOutput || streams.AttachError { return <-receiveStdoutError } diff --git a/vendor/github.com/containers/podman/v3/libpod/oci_conmon.go b/vendor/github.com/containers/podman/v4/libpod/oci_conmon.go similarity index 100% rename from vendor/github.com/containers/podman/v3/libpod/oci_conmon.go rename to vendor/github.com/containers/podman/v4/libpod/oci_conmon.go diff --git a/vendor/github.com/containers/podman/v3/libpod/oci_conmon_exec_linux.go b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v3/libpod/oci_conmon_exec_linux.go rename to vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_linux.go index 822377bfe87..6d2f135252e 100644 --- a/vendor/github.com/containers/podman/v3/libpod/oci_conmon_exec_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_linux.go @@ -13,11 +13,11 @@ import ( "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/errorhandling" - "github.com/containers/podman/v3/pkg/lookup" - "github.com/containers/podman/v3/pkg/util" - "github.com/containers/podman/v3/utils" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/lookup" + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v4/utils" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -76,7 +76,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options return -1, nil, errors.Wrapf(err, "cannot run conmon") } - pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog) return pid, attachChan, err } @@ -134,7 +134,7 @@ func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, o conmonPipeDataChan := make(chan conmonPipeData) go func() { // attachToExec is responsible for closing pipes - attachChan <- attachExecHTTP(ctr, sessionID, req, w, streams, pipes, detachKeys, options.Terminal, cancel, hijackDone, holdConnOpen, execCmd, conmonPipeDataChan, ociLog, newSize) + attachChan <- attachExecHTTP(ctr, sessionID, req, w, streams, pipes, detachKeys, options.Terminal, cancel, hijackDone, holdConnOpen, execCmd, conmonPipeDataChan, ociLog, newSize, r.name) close(attachChan) }() @@ -176,7 +176,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin // Wait for Conmon to tell us we're ready to attach. // We aren't actually *going* to attach, but this means that we're good // to proceed. - if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { + if _, err := readConmonPipeData(r.name, pipes.attachPipe, ""); err != nil { return -1, err } @@ -190,7 +190,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin return -1, errors.Wrapf(err, "cannot run conmon") } - pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog) return pid, err } @@ -257,7 +257,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t } // Wait for the PID to stop - if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil { + if err := waitPidStop(pid, killContainerTimeout); err != nil { return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid) } @@ -389,6 +389,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex if err != nil { return nil, nil, err } + defer processFile.Close() args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, define.NoLogging, "") @@ -438,7 +439,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex // } // } - conmonEnv := r.configureConmonEnv(c, runtimeDir) + conmonEnv := r.configureConmonEnv(runtimeDir) var filesToClose []*os.File if options.PreserveFDs > 0 { @@ -461,7 +462,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex Setpgid: true, } - err = startCommandGivenSelinux(execCmd, c) + err = startCommand(execCmd, c) // We don't need children pipes on the parent side errorhandling.CloseQuiet(childSyncPipe) @@ -486,7 +487,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex } // Attach to a container over HTTP -func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string, newSize *define.TerminalSize) (deferredErr error) { +func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, execCmd *exec.Cmd, conmonPipeDataChan chan<- conmonPipeData, ociLog string, newSize *define.TerminalSize, runtimeName string) (deferredErr error) { // NOTE: As you may notice, the attach code is quite complex. // Many things happen concurrently and yet are interdependent. // If you ever change this function, make sure to write to the @@ -519,7 +520,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp } // 2: read from attachFd that the parent process has set up the console socket - if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil { + if _, err := readConmonPipeData(runtimeName, pipes.attachPipe, ""); err != nil { conmonPipeDataChan <- conmonPipeData{-1, err} return err } @@ -582,7 +583,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp if err := execCmd.Wait(); err != nil { conmonPipeDataChan <- conmonPipeData{-1, err} } else { - pid, err := readConmonPipeData(pipes.syncPipe, ociLog) + pid, err := readConmonPipeData(runtimeName, pipes.syncPipe, ociLog) if err != nil { hijackWriteError(err, c.ID(), isTerminal, httpBuf) conmonPipeDataChan <- conmonPipeData{pid, err} @@ -609,9 +610,6 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp _, err := utils.CopyDetachable(conn, httpBuf, detachKeys) logrus.Debugf("STDIN copy completed") stdinChan <- err - if connErr := conn.CloseWrite(); connErr != nil { - logrus.Errorf("Unable to close conn: %v", connErr) - } }() } @@ -654,6 +652,10 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp if err != nil { return err } + // copy stdin is done, close it + if connErr := conn.CloseWrite(); connErr != nil { + logrus.Errorf("Unable to close conn: %v", connErr) + } case <-cancel: return nil } @@ -756,18 +758,19 @@ func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessio } else { pspec.Capabilities.Bounding = ctrSpec.Process.Capabilities.Bounding } + + // Always unset the inheritable capabilities similarly to what the Linux kernel does + // They are used only when using capabilities with uid != 0. + pspec.Capabilities.Inheritable = []string{} + if execUser.Uid == 0 { pspec.Capabilities.Effective = pspec.Capabilities.Bounding - pspec.Capabilities.Inheritable = pspec.Capabilities.Bounding pspec.Capabilities.Permitted = pspec.Capabilities.Bounding - pspec.Capabilities.Ambient = pspec.Capabilities.Bounding - } else { - if user == c.config.User { - pspec.Capabilities.Effective = ctrSpec.Process.Capabilities.Effective - pspec.Capabilities.Inheritable = ctrSpec.Process.Capabilities.Effective - pspec.Capabilities.Permitted = ctrSpec.Process.Capabilities.Effective - pspec.Capabilities.Ambient = ctrSpec.Process.Capabilities.Effective - } + } else if user == c.config.User { + pspec.Capabilities.Effective = ctrSpec.Process.Capabilities.Effective + pspec.Capabilities.Inheritable = ctrSpec.Process.Capabilities.Effective + pspec.Capabilities.Permitted = ctrSpec.Process.Capabilities.Effective + pspec.Capabilities.Ambient = ctrSpec.Process.Capabilities.Effective } hasHomeSet := false diff --git a/vendor/github.com/containers/podman/v3/libpod/oci_conmon_linux.go b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go similarity index 89% rename from vendor/github.com/containers/podman/v3/libpod/oci_conmon_linux.go rename to vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go index 71a7b29fa01..c232702e9b6 100644 --- a/vendor/github.com/containers/podman/v3/libpod/oci_conmon_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod @@ -22,21 +23,21 @@ import ( "text/template" "time" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" conmonConfig "github.com/containers/conmon/runner/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/logs" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/checkpoint/crutils" - "github.com/containers/podman/v3/pkg/errorhandling" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" - "github.com/containers/podman/v3/utils" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/logs" + "github.com/containers/podman/v4/pkg/checkpoint/crutils" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/specgenutil" + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v4/utils" "github.com/containers/storage/pkg/homedir" pmount "github.com/containers/storage/pkg/mount" "github.com/coreos/go-systemd/v22/daemon" spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -183,35 +184,39 @@ func hasCurrentUserMapped(ctr *Container) bool { } // CreateContainer creates a container. -func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error { +func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { // always make the run dir accessible to the current user so that the PID files can be read without // being in the rootless user namespace. if err := makeAccessible(ctr.state.RunDir, 0, 0); err != nil { - return err + return 0, err } if !hasCurrentUserMapped(ctr) { for _, i := range []string{ctr.state.RunDir, ctr.runtime.config.Engine.TmpDir, ctr.config.StaticDir, ctr.state.Mountpoint, ctr.runtime.config.Engine.VolumePath} { if err := makeAccessible(i, ctr.RootUID(), ctr.RootGID()); err != nil { - return err + return 0, err } } // if we are running a non privileged container, be sure to umount some kernel paths so they are not // bind mounted inside the container at all. if !ctr.config.Privileged && !rootless.IsRootless() { - ch := make(chan error) + type result struct { + restoreDuration int64 + err error + } + ch := make(chan result) go func() { runtime.LockOSThread() - err := func() error { + restoreDuration, err := func() (int64, error) { fd, err := os.Open(fmt.Sprintf("/proc/%d/task/%d/ns/mnt", os.Getpid(), unix.Gettid())) if err != nil { - return err + return 0, err } defer errorhandling.CloseQuiet(fd) // create a new mountns on the current thread if err = unix.Unshare(unix.CLONE_NEWNS); err != nil { - return err + return 0, err } defer func() { if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil { @@ -224,12 +229,12 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta // changes are propagated to the host. err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "") if err != nil { - return errors.Wrapf(err, "cannot make /sys slave") + return 0, errors.Wrapf(err, "cannot make /sys slave") } mounts, err := pmount.GetMounts() if err != nil { - return err + return 0, err } for _, m := range mounts { if !strings.HasPrefix(m.Mountpoint, "/sys/kernel") { @@ -237,15 +242,18 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta } err = unix.Unmount(m.Mountpoint, 0) if err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "cannot unmount %s", m.Mountpoint) + return 0, errors.Wrapf(err, "cannot unmount %s", m.Mountpoint) } } return r.createOCIContainer(ctr, restoreOptions) }() - ch <- err + ch <- result{ + restoreDuration: restoreDuration, + err: err, + } }() - err := <-ch - return err + r := <-ch + return r.restoreDuration, r.err } } return r.createOCIContainer(ctr, restoreOptions) @@ -289,7 +297,7 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { if err2 != nil { return errors.Wrapf(err, "error getting container %s state", ctr.ID()) } - if strings.Contains(string(out), "does not exist") { + if strings.Contains(string(out), "does not exist") || strings.Contains(string(out), "No such file") { if err := ctr.removeConmonFiles(); err != nil { logrus.Debugf("unable to remove conmon files for container %s", ctr.ID()) } @@ -351,6 +359,12 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { return ctr.handleExitFile(exitFile, fi) } + // Handle ContainerStateStopping - keep it unless the container + // transitioned to no longer running. + if oldState == define.ContainerStateStopping && (ctr.state.State == define.ContainerStatePaused || ctr.state.State == define.ContainerStateRunning) { + ctr.state.State = define.ContainerStateStopping + } + return nil } @@ -393,6 +407,14 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) args = append(args, "kill", ctr.ID(), fmt.Sprintf("%d", signal)) } if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil { + // Update container state - there's a chance we failed because + // the container exited in the meantime. + if err2 := r.UpdateContainerStatus(ctr); err2 != nil { + logrus.Infof("Error updating status for container %s: %v", ctr.ID(), err2) + } + if ctr.state.State == define.ContainerStateExited { + return nil + } return errors.Wrapf(err, "error sending signal to container %s", ctr.ID()) } @@ -435,7 +457,8 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) } if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil { - logrus.Infof("Timed out stopping container %s, resorting to SIGKILL: %v", ctr.ID(), err) + logrus.Debugf("Timed out stopping container %s with %s, resorting to SIGKILL: %v", ctr.ID(), unix.SignalName(syscall.Signal(stopSignal)), err) + logrus.Warnf("StopSignal %s failed to stop container %s in %d seconds, resorting to SIGKILL", unix.SignalName(syscall.Signal(stopSignal)), ctr.Name(), timeout) } else { // No error, the container is dead return nil @@ -637,13 +660,13 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http. } errChan <- err }() + if err := ctr.ReadLog(context.Background(), logOpts, logChan, 0); err != nil { + return err + } go func() { logOpts.WaitGroup.Wait() close(logChan) }() - if err := ctr.ReadLog(context.Background(), logOpts, logChan); err != nil { - return err - } logrus.Debugf("Done reading logs for container %s, %d bytes", ctr.ID(), logSize) if err := <-errChan; err != nil { return err @@ -701,6 +724,10 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http. if err != nil { return err } + // copy stdin is done, close it + if connErr := conn.CloseWrite(); connErr != nil { + logrus.Errorf("Unable to close conn: %v", connErr) + } case <-cancel: return nil } @@ -722,7 +749,7 @@ func openControlFile(ctr *Container, parentDir string) (*os.File, error) { for i := 0; i < 600; i++ { controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY|unix.O_NONBLOCK, 0) if err == nil { - return controlFile, err + return controlFile, nil } if !isRetryable(err) { return nil, errors.Wrapf(err, "could not open ctl file for terminal resize for container %s", ctr.ID()) @@ -749,10 +776,7 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalS } // CheckpointContainer checkpoints the given container. -func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { - if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { - return err - } +func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) (int64, error) { // imagePath is used by CRIU to store the actual checkpoint files imagePath := ctr.CheckpointPath() if options.PreCheckPoint { @@ -776,6 +800,9 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container if options.TCPEstablished { args = append(args, "--tcp-established") } + if options.FileLocks { + args = append(args, "--file-locks") + } if !options.PreCheckPoint && options.KeepRunning { args = append(args, "--leave-running") } @@ -789,16 +816,45 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container filepath.Join("..", preCheckpointDir), ) } + + args = append(args, ctr.ID()) + logrus.Debugf("the args to checkpoint: %s %s", r.path, strings.Join(args, " ")) + runtimeDir, err := util.GetRuntimeDir() if err != nil { - return err + return 0, err } - if err = os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil { - return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR") + env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)} + if path, ok := os.LookupEnv("PATH"); ok { + env = append(env, fmt.Sprintf("PATH=%s", path)) } - args = append(args, ctr.ID()) - logrus.Debugf("the args to checkpoint: %s %s", r.path, strings.Join(args, " ")) - return utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, nil, r.path, args...) + + runtime.LockOSThread() + if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { + return 0, err + } + + runtimeCheckpointStarted := time.Now() + err = utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...) + // Ignore error returned from SetSocketLabel("") call, + // can't recover. + if labelErr := label.SetSocketLabel(""); labelErr == nil { + // Unlock the thread only if the process label could be restored + // successfully. Otherwise leave the thread locked and the Go runtime + // will terminate it once it returns to the threads pool. + runtime.UnlockOSThread() + } else { + logrus.Errorf("Unable to reset socket label: %q", labelErr) + } + + runtimeCheckpointDuration := func() int64 { + if options.PrintStats { + return time.Since(runtimeCheckpointStarted).Microseconds() + } + return 0 + }() + + return runtimeCheckpointDuration, err } func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) { @@ -958,7 +1014,8 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) { } data, err := ctr.inspectLocked(false) if err != nil { - return "", nil + // FIXME: this error should probably be returned + return "", nil // nolint: nilerr } tmpl, err := template.New("container").Parse(logTag) if err != nil { @@ -973,23 +1030,23 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) { } // createOCIContainer generates this container's main conmon instance and prepares it for starting -func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error { +func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { var stderrBuf bytes.Buffer runtimeDir, err := util.GetRuntimeDir() if err != nil { - return err + return 0, err } parentSyncPipe, childSyncPipe, err := newPipe() if err != nil { - return errors.Wrapf(err, "error creating socket pair") + return 0, errors.Wrapf(err, "error creating socket pair") } defer errorhandling.CloseQuiet(parentSyncPipe) childStartPipe, parentStartPipe, err := newPipe() if err != nil { - return errors.Wrapf(err, "error creating socket pair for start pipe") + return 0, errors.Wrapf(err, "error creating socket pair for start pipe") } defer errorhandling.CloseQuiet(parentStartPipe) @@ -1001,12 +1058,12 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co logTag, err := r.getLogTag(ctr) if err != nil { - return err + return 0, err } if ctr.config.CgroupsMode == cgroupSplit { - if err := utils.MoveUnderCgroupSubtree("supervisor"); err != nil { - return err + if err := utils.MoveUnderCgroupSubtree("runtime"); err != nil { + return 0, err } } @@ -1042,11 +1099,15 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co args = append(args, "--no-pivot") } - if len(ctr.config.ExitCommand) > 0 { - args = append(args, "--exit-command", ctr.config.ExitCommand[0]) - for _, arg := range ctr.config.ExitCommand[1:] { - args = append(args, []string{"--exit-command-arg", arg}...) - } + exitCommand, err := specgenutil.CreateExitCommandArgs(ctr.runtime.storageConfig, ctr.runtime.config, logrus.IsLevelEnabled(logrus.DebugLevel), ctr.AutoRemove(), false) + if err != nil { + return 0, err + } + exitCommand = append(exitCommand, ctr.config.ID) + + args = append(args, "--exit-command", exitCommand[0]) + for _, arg := range exitCommand[1:] { + args = append(args, []string{"--exit-command-arg", arg}...) } // Pass down the LISTEN_* environment (see #10443). @@ -1057,7 +1118,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co } else { fds, err := strconv.Atoi(val) if err != nil { - return fmt.Errorf("converting LISTEN_FDS=%s: %w", val, err) + return 0, fmt.Errorf("converting LISTEN_FDS=%s: %w", val, err) } preserveFDs = uint(fds) } @@ -1072,6 +1133,9 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co if restoreOptions.TCPEstablished { args = append(args, "--runtime-opt", "--tcp-established") } + if restoreOptions.FileLocks { + args = append(args, "--runtime-opt", "--file-locks") + } if restoreOptions.Pod != "" { mountLabel := ctr.config.MountLabel processLabel := ctr.config.ProcessLabel @@ -1117,7 +1181,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co } // 0, 1 and 2 are stdin, stdout and stderr - conmonEnv := r.configureConmonEnv(ctr, runtimeDir) + conmonEnv := r.configureConmonEnv(runtimeDir) var filesToClose []*os.File if preserveFDs > 0 { @@ -1136,9 +1200,9 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co cmd.ExtraFiles = append(cmd.ExtraFiles, childSyncPipe, childStartPipe) if r.reservePorts && !rootless.IsRootless() && !ctr.config.NetMode.IsSlirp4netns() { - ports, err := bindPorts(ctr.config.PortMappings) + ports, err := bindPorts(ctr.convertPortMappings()) if err != nil { - return err + return 0, err } filesToClose = append(filesToClose, ports...) @@ -1154,12 +1218,12 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co if havePortMapping { ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe() if err != nil { - return errors.Wrapf(err, "failed to create rootless port sync pipe") + return 0, errors.Wrapf(err, "failed to create rootless port sync pipe") } } ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe() if err != nil { - return errors.Wrapf(err, "failed to create rootless network sync pipe") + return 0, errors.Wrapf(err, "failed to create rootless network sync pipe") } } else { if ctr.rootlessSlirpSyncR != nil { @@ -1178,30 +1242,33 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessPortSyncW) } } - - err = startCommandGivenSelinux(cmd, ctr) + var runtimeRestoreStarted time.Time + if restoreOptions != nil { + runtimeRestoreStarted = time.Now() + } + err = startCommand(cmd, ctr) // regardless of whether we errored or not, we no longer need the children pipes childSyncPipe.Close() childStartPipe.Close() if err != nil { - return err + return 0, err } if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe); err != nil { - return err + return 0, err } /* Wait for initial setup and fork, and reap child */ err = cmd.Wait() if err != nil { - return err + return 0, err } - pid, err := readConmonPipeData(parentSyncPipe, ociLog) + pid, err := readConmonPipeData(r.name, parentSyncPipe, ociLog) if err != nil { if err2 := r.DeleteContainer(ctr); err2 != nil { logrus.Errorf("Removing container %s from runtime after creation failed", ctr.ID()) } - return err + return 0, err } ctr.state.PID = pid @@ -1227,24 +1294,35 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co } } + runtimeRestoreDuration := func() int64 { + if restoreOptions != nil && restoreOptions.PrintStats { + return time.Since(runtimeRestoreStarted).Microseconds() + } + return 0 + }() + // These fds were passed down to the runtime. Close them // and not interfere for _, f := range filesToClose { errorhandling.CloseQuiet(f) } - return nil + return runtimeRestoreDuration, nil } // configureConmonEnv gets the environment values to add to conmon's exec struct // TODO this may want to be less hardcoded/more configurable in the future -func (r *ConmonOCIRuntime) configureConmonEnv(ctr *Container, runtimeDir string) []string { +func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) []string { var env []string for _, e := range os.Environ() { if strings.HasPrefix(e, "LC_") { env = append(env, e) } } + conf, ok := os.LookupEnv("CONTAINERS_CONF") + if ok { + env = append(env, fmt.Sprintf("CONTAINERS_CONF=%s", conf)) + } env = append(env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)) env = append(env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED"))) env = append(env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID"))) @@ -1293,7 +1371,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p case define.JSONLogging: fallthrough //lint:ignore ST1015 the default case has to be here - default: //nolint-stylecheck + default: //nolint:stylecheck,gocritic // No case here should happen except JSONLogging, but keep this here in case the options are extended logrus.Errorf("%s logging specified but not supported. Choosing k8s-file logging instead", ctr.LogDriver()) fallthrough @@ -1329,15 +1407,13 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p args = append(args, "--log-tag", logTag) } if ctr.config.NoCgroups { - logrus.Debugf("Running with no CGroups") + logrus.Debugf("Running with no Cgroups") args = append(args, "--runtime-arg", "--cgroup-manager", "--runtime-arg", "disabled") } return args } -// startCommandGivenSelinux starts a container ensuring to set the labels of -// the process to make sure SELinux doesn't block conmon communication, if SELinux is enabled -func startCommandGivenSelinux(cmd *exec.Cmd, ctr *Container) error { +func startCommand(cmd *exec.Cmd, ctr *Container) error { // Make sure to unset the NOTIFY_SOCKET and reset if afterwards if needed. switch ctr.config.SdNotifyMode { case define.SdNotifyModeContainer, define.SdNotifyModeIgnore: @@ -1354,43 +1430,7 @@ func startCommandGivenSelinux(cmd *exec.Cmd, ctr *Container) error { } } - if !selinux.GetEnabled() { - return cmd.Start() - } - // Set the label of the conmon process to be level :s0 - // This will allow the container processes to talk to fifo-files - // passed into the container by conmon - var ( - plabel string - con selinux.Context - err error - ) - plabel, err = selinux.CurrentLabel() - if err != nil { - return errors.Wrapf(err, "failed to get current SELinux label") - } - - con, err = selinux.NewContext(plabel) - if err != nil { - return errors.Wrapf(err, "failed to get new context from SELinux label") - } - - runtime.LockOSThread() - if con["level"] != "s0" && con["level"] != "" { - con["level"] = "s0" - if err = label.SetProcessLabel(con.Get()); err != nil { - runtime.UnlockOSThread() - return err - } - } - err = cmd.Start() - // Ignore error returned from SetProcessLabel("") call, - // can't recover. - if labelErr := label.SetProcessLabel(""); labelErr != nil { - logrus.Errorf("Unable to set process label: %q", err) - } - runtime.UnlockOSThread() - return err + return cmd.Start() } // moveConmonToCgroupAndSignal gets a container's cgroupParent and moves the conmon process to that cgroup @@ -1487,7 +1527,7 @@ func readConmonPidFile(pidFile string) (int, error) { } // readConmonPipeData attempts to read a syncInfo struct from the pipe -func readConmonPipeData(pipe *os.File, ociLog string) (int, error) { +func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int, error) { // syncInfo is used to return data from monitor process to daemon type syncInfo struct { Data int `json:"data"` @@ -1504,17 +1544,19 @@ func readConmonPipeData(pipe *os.File, ociLog string) (int, error) { var si *syncInfo rdr := bufio.NewReader(pipe) b, err := rdr.ReadBytes('\n') - if err != nil { + // ignore EOF here, error is returned even when data was read + // if it is no valid json unmarshal will fail below + if err != nil && !errors.Is(err, io.EOF) { ch <- syncStruct{err: err} } if err := json.Unmarshal(b, &si); err != nil { - ch <- syncStruct{err: err} + ch <- syncStruct{err: fmt.Errorf("conmon bytes %q: %w", string(b), err)} return } ch <- syncStruct{si: si} }() - data := -1 + data := -1 //nolint: wastedassign select { case ss := <-ch: if ss.err != nil { @@ -1523,7 +1565,7 @@ func readConmonPipeData(pipe *os.File, ociLog string) (int, error) { if err == nil { var ociErr ociError if err := json.Unmarshal(ociLogData, &ociErr); err == nil { - return -1, getOCIRuntimeError(ociErr.Msg) + return -1, getOCIRuntimeError(runtimeName, ociErr.Msg) } } } @@ -1536,13 +1578,13 @@ func readConmonPipeData(pipe *os.File, ociLog string) (int, error) { if err == nil { var ociErr ociError if err := json.Unmarshal(ociLogData, &ociErr); err == nil { - return ss.si.Data, getOCIRuntimeError(ociErr.Msg) + return ss.si.Data, getOCIRuntimeError(runtimeName, ociErr.Msg) } } } // If we failed to parse the JSON errors, then print the output as it is if ss.si.Message != "" { - return ss.si.Data, getOCIRuntimeError(ss.si.Message) + return ss.si.Data, getOCIRuntimeError(runtimeName, ss.si.Message) } return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed") } diff --git a/vendor/github.com/containers/podman/v3/libpod/oci_missing.go b/vendor/github.com/containers/podman/v4/libpod/oci_missing.go similarity index 97% rename from vendor/github.com/containers/podman/v3/libpod/oci_missing.go rename to vendor/github.com/containers/podman/v4/libpod/oci_missing.go index fcf2ffca84a..86f54c02e73 100644 --- a/vendor/github.com/containers/podman/v3/libpod/oci_missing.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_missing.go @@ -6,7 +6,7 @@ import ( "path/filepath" "sync" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -66,8 +66,8 @@ func (r *MissingRuntime) Path() string { } // CreateContainer is not available as the runtime is missing -func (r *MissingRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) error { - return r.printError() +func (r *MissingRuntime) CreateContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { + return 0, r.printError() } // UpdateContainerStatus is not available as the runtime is missing @@ -153,8 +153,8 @@ func (r *MissingRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (boo } // CheckpointContainer is not available as the runtime is missing -func (r *MissingRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { - return r.printError() +func (r *MissingRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) (int64, error) { + return 0, r.printError() } // CheckConmonRunning is not available as the runtime is missing diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_util.go b/vendor/github.com/containers/podman/v4/libpod/oci_util.go new file mode 100644 index 00000000000..64edfdef2f0 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/oci_util.go @@ -0,0 +1,169 @@ +package libpod + +import ( + "fmt" + "net" + "os" + "regexp" + "strings" + "time" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/libpod/define" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Timeout before declaring that runtime has failed to kill a given +// container +const killContainerTimeout = 5 * time.Second + +// ociError is used to parse the OCI runtime JSON log. It is not part of the +// OCI runtime specifications, it follows what runc does +type ociError struct { + Level string `json:"level,omitempty"` + Time string `json:"time,omitempty"` + Msg string `json:"msg,omitempty"` +} + +// Create systemd unit name for cgroup scopes +func createUnitName(prefix string, name string) string { + return fmt.Sprintf("%s-%s.scope", prefix, name) +} + +// Bind ports to keep them closed on the host +func bindPorts(ports []types.PortMapping) ([]*os.File, error) { + var files []*os.File + sctpWarning := true + for _, port := range ports { + isV6 := net.ParseIP(port.HostIP).To4() == nil + if port.HostIP == "" { + isV6 = false + } + protocols := strings.Split(port.Protocol, ",") + for _, protocol := range protocols { + for i := uint16(0); i < port.Range; i++ { + f, err := bindPort(protocol, port.HostIP, port.HostPort+i, isV6, &sctpWarning) + if err != nil { + return files, err + } + if f != nil { + files = append(files, f) + } + } + } + } + return files, nil +} + +func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool) (*os.File, error) { + var file *os.File + switch protocol { + case "udp": + var ( + addr *net.UDPAddr + err error + ) + if isV6 { + addr, err = net.ResolveUDPAddr("udp6", fmt.Sprintf("[%s]:%d", hostIP, port)) + } else { + addr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", hostIP, port)) + } + if err != nil { + return nil, errors.Wrapf(err, "cannot resolve the UDP address") + } + + proto := "udp4" + if isV6 { + proto = "udp6" + } + server, err := net.ListenUDP(proto, addr) + if err != nil { + return nil, errors.Wrapf(err, "cannot listen on the UDP port") + } + file, err = server.File() + if err != nil { + return nil, errors.Wrapf(err, "cannot get file for UDP socket") + } + // close the listener + // note that this does not affect the fd, see the godoc for server.File() + err = server.Close() + if err != nil { + logrus.Warnf("Failed to close connection: %v", err) + } + + case "tcp": + var ( + addr *net.TCPAddr + err error + ) + if isV6 { + addr, err = net.ResolveTCPAddr("tcp6", fmt.Sprintf("[%s]:%d", hostIP, port)) + } else { + addr, err = net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", hostIP, port)) + } + if err != nil { + return nil, errors.Wrapf(err, "cannot resolve the TCP address") + } + + proto := "tcp4" + if isV6 { + proto = "tcp6" + } + server, err := net.ListenTCP(proto, addr) + if err != nil { + return nil, errors.Wrapf(err, "cannot listen on the TCP port") + } + file, err = server.File() + if err != nil { + return nil, errors.Wrapf(err, "cannot get file for TCP socket") + } + // close the listener + // note that this does not affect the fd, see the godoc for server.File() + err = server.Close() + if err != nil { + logrus.Warnf("Failed to close connection: %v", err) + } + + case "sctp": + if *sctpWarning { + logrus.Info("Port reservation for SCTP is not supported") + *sctpWarning = false + } + default: + return nil, fmt.Errorf("unknown protocol %s", protocol) + } + return file, nil +} + +func getOCIRuntimeError(name, runtimeMsg string) error { + includeFullOutput := logrus.GetLevel() == logrus.DebugLevel + + if match := regexp.MustCompile("(?i).*permission denied.*|.*operation not permitted.*").FindString(runtimeMsg); match != "" { + errStr := match + if includeFullOutput { + errStr = runtimeMsg + } + return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s: %s", name, strings.Trim(errStr, "\n")) + } + if match := regexp.MustCompile("(?i).*executable file not found in.*|.*no such file or directory.*").FindString(runtimeMsg); match != "" { + errStr := match + if includeFullOutput { + errStr = runtimeMsg + } + return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s: %s", name, strings.Trim(errStr, "\n")) + } + if match := regexp.MustCompile("`/proc/[a-z0-9-].+/attr.*`").FindString(runtimeMsg); match != "" { + errStr := match + if includeFullOutput { + errStr = runtimeMsg + } + if strings.HasSuffix(match, "/exec`") { + return errors.Wrapf(define.ErrSetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n")) + } else if strings.HasSuffix(match, "/current`") { + return errors.Wrapf(define.ErrGetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n")) + } + return errors.Wrapf(define.ErrSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n")) + } + return errors.Wrapf(define.ErrOCIRuntime, "%s: %s", name, strings.Trim(runtimeMsg, "\n")) +} diff --git a/vendor/github.com/containers/podman/v3/libpod/options.go b/vendor/github.com/containers/podman/v4/libpod/options.go similarity index 92% rename from vendor/github.com/containers/podman/v3/libpod/options.go rename to vendor/github.com/containers/podman/v4/libpod/options.go index 553af43fd0e..98eb45e76fa 100644 --- a/vendor/github.com/containers/podman/v3/libpod/options.go +++ b/vendor/github.com/containers/podman/v4/libpod/options.go @@ -8,17 +8,17 @@ import ( "syscall" "github.com/containers/buildah/pkg/parse" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - nettypes "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/namespaces" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/runtime-spec/specs-go" @@ -115,19 +115,6 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { } } -// WithDefaultTransport sets the default transport for retrieving images. -func WithDefaultTransport(defaultTransport string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return define.ErrRuntimeFinalized - } - - rt.config.Engine.ImageDefaultTransport = defaultTransport - - return nil - } -} - // WithSignaturePolicy specifies the path of a file which decides how trust is // managed for images we've pulled. // If this is not specified, the system default configuration will be used @@ -144,26 +131,6 @@ func WithSignaturePolicy(path string) RuntimeOption { } } -// WithStateType sets the backing state implementation for libpod. -// Please note that information is not portable between backing states. -// As such, if this differs between two libpods running on the same system, -// they will not share containers, and unspecified behavior may occur. -func WithStateType(storeType config.RuntimeStateStore) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return define.ErrRuntimeFinalized - } - - if storeType == config.InvalidStateStore { - return errors.Wrapf(define.ErrInvalidArg, "must provide a valid state store type") - } - - rt.config.Engine.StateType = storeType - - return nil - } -} - // WithOCIRuntime specifies an OCI runtime to use for running containers. func WithOCIRuntime(runtime string) RuntimeOption { return func(rt *Runtime) error { @@ -227,6 +194,19 @@ func WithNetworkCmdPath(path string) RuntimeOption { } } +// WithNetworkBackend specifies the name of the network backend. +func WithNetworkBackend(name string) RuntimeOption { + return func(rt *Runtime) error { + if rt.valid { + return define.ErrRuntimeFinalized + } + + rt.config.Network.NetworkBackend = name + + return nil + } +} + // WithCgroupManager specifies the manager implementation name which is used to // handle cgroups for containers. // Current valid values are "cgroupfs" and "systemd". @@ -237,7 +217,7 @@ func WithCgroupManager(manager string) RuntimeOption { } if manager != config.CgroupfsCgroupsManager && manager != config.SystemdCgroupsManager { - return errors.Wrapf(define.ErrInvalidArg, "CGroup manager must be one of %s and %s", + return errors.Wrapf(define.ErrInvalidArg, "Cgroup manager must be one of %s and %s", config.CgroupfsCgroupsManager, config.SystemdCgroupsManager) } @@ -310,6 +290,17 @@ func WithCDI(devices []string) CtrCreateOption { } } +// WithStorageOpts sets the devices to check for for CDI configuration. +func WithStorageOpts(storageOpts map[string]string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + ctr.config.StorageOpts = storageOpts + return nil + } +} + // WithDefaultMountsFile sets the file to look at for default mounts (mainly // secrets). // Note we are not saving this in the database as it is for testing purposes @@ -428,23 +419,6 @@ func WithVolumePath(volPath string) RuntimeOption { } } -// WithDefaultInfraImage sets the infra image for libpod. -// An infra image is used for inter-container kernel -// namespace sharing within a pod. Typically, an infra -// container is lightweight and is there to reap -// zombie processes within its pid namespace. -func WithDefaultInfraImage(img string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return define.ErrRuntimeFinalized - } - - rt.config.Engine.InfraImage = img - - return nil - } -} - // WithDefaultInfraCommand sets the command to // run on pause container start up. func WithDefaultInfraCommand(cmd string) RuntimeOption { @@ -459,19 +433,6 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption { } } -// WithDefaultInfraName sets the infra container name for a single pod. -func WithDefaultInfraName(name string) RuntimeOption { - return func(rt *Runtime) error { - if rt.valid { - return define.ErrRuntimeFinalized - } - - rt.config.Engine.InfraImage = name - - return nil - } -} - // WithRenumber instructs libpod to perform a lock renumbering while // initializing. This will handle migrations from early versions of libpod with // file locks to newer versions with SHM locking, as well as changes in the @@ -539,8 +500,6 @@ func WithEventsLogger(logger string) RuntimeOption { } rt.config.Engine.EventsLogger = logger - rt.config.Engine.EventsLogFilePath = filepath.Join(rt.config.Engine.TmpDir, "events", "events.log") - return nil } } @@ -554,6 +513,14 @@ func WithEnableSDNotify() RuntimeOption { } } +// WithSyslog sets a runtime option so we know that we have to log to the syslog as well +func WithSyslog() RuntimeOption { + return func(rt *Runtime) error { + rt.syslog = true + return nil + } +} + // WithRuntimeFlags adds the global runtime flags to the container config func WithRuntimeFlags(runtimeFlags []string) RuntimeOption { return func(rt *Runtime) error { @@ -592,6 +559,30 @@ func WithShmDir(dir string) CtrCreateOption { } } +// WithNOShmMount tells libpod whether to mount /dev/shm +func WithNoShm(mount bool) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.NoShm = mount + return nil + } +} + +// WithNoShmShare tells libpod whether to share containers /dev/shm with other containers +func WithNoShmShare(share bool) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.NoShmShare = share + return nil + } +} + // WithSystemd turns on systemd mode in the container func WithSystemd() CtrCreateOption { return func(ctr *Container) error { @@ -599,7 +590,8 @@ func WithSystemd() CtrCreateOption { return define.ErrCtrFinalized } - ctr.config.Systemd = true + t := true + ctr.config.Systemd = &t return nil } } @@ -811,20 +803,6 @@ func WithIDMappings(idmappings storage.IDMappingOptions) CtrCreateOption { } } -// WithExitCommand sets the ExitCommand for the container, appending on the ctr.ID() to the end -func WithExitCommand(exitCommand []string) CtrCreateOption { - return func(ctr *Container) error { - if ctr.valid { - return define.ErrCtrFinalized - } - - ctr.config.ExitCommand = exitCommand - ctr.config.ExitCommand = append(ctr.config.ExitCommand, ctr.ID()) - - return nil - } -} - // WithUTSNSFromPod indicates the the container should join the UTS namespace of // its pod func WithUTSNSFromPod(p *Pod) CtrCreateOption { @@ -958,7 +936,10 @@ func WithUserNSFrom(nsCtr *Container) CtrCreateOption { if err := JSONDeepCopy(nsCtr.IDMappings(), &ctr.config.IDMappings); err != nil { return err } - g := generate.Generator{Config: ctr.config.Spec} + // NewFromSpec() is deprecated according to its comment + // however the recommended replace just causes a nil map panic + //nolint:staticcheck + g := generate.NewFromSpec(ctr.config.Spec) g.ClearLinuxUIDMappings() for _, uidmap := range nsCtr.config.IDMappings.UIDMap { @@ -992,7 +973,7 @@ func WithUTSNSFrom(nsCtr *Container) CtrCreateOption { } } -// WithCgroupNSFrom indicates the the container should join the CGroup namespace +// WithCgroupNSFrom indicates the the container should join the Cgroup namespace // of the given container. // If the container has joined a pod, it can only join the namespaces of // containers in the same pod. @@ -1040,7 +1021,7 @@ func WithDependencyCtrs(ctrs []*Container) CtrCreateOption { // namespace with a minimal configuration. // An optional array of port mappings can be provided. // Conflicts with WithNetNSFrom(). -func WithNetNS(portMappings []nettypes.OCICNIPortMapping, exposedPorts map[uint16][]string, postConfigureNetNS bool, netmode string, networks []string) CtrCreateOption { +func WithNetNS(portMappings []nettypes.PortMapping, exposedPorts map[uint16][]string, postConfigureNetNS bool, netmode string, networks map[string]nettypes.PerNetworkOptions) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { return define.ErrCtrFinalized @@ -1052,24 +1033,10 @@ func WithNetNS(portMappings []nettypes.OCICNIPortMapping, exposedPorts map[uint1 ctr.config.PortMappings = portMappings ctr.config.ExposedPorts = exposedPorts - ctr.config.Networks = networks - - return nil - } -} - -// WithStaticIP indicates that the container should request a static IP from -// the CNI plugins. -// It cannot be set unless WithNetNS has already been passed. -// Further, it cannot be set if additional CNI networks to join have been -// specified. -func WithStaticIP(ip net.IP) CtrCreateOption { - return func(ctr *Container) error { - if ctr.valid { - return define.ErrCtrFinalized + if !ctr.config.NetMode.IsBridge() && len(networks) > 0 { + return errors.New("cannot use networks when network mode is not bridge") } - - ctr.config.StaticIP = ip + ctr.config.Networks = networks return nil } @@ -1088,23 +1055,6 @@ func WithNetworkOptions(options map[string][]string) CtrCreateOption { } } -// WithStaticMAC indicates that the container should request a static MAC from -// the CNI plugins. -// It cannot be set unless WithNetNS has already been passed. -// Further, it cannot be set if additional CNI networks to join have been -// specified. -func WithStaticMAC(mac net.HardwareAddr) CtrCreateOption { - return func(ctr *Container) error { - if ctr.valid { - return define.ErrCtrFinalized - } - - ctr.config.StaticMAC = mac - - return nil - } -} - // WithLogDriver sets the log driver for the container func WithLogDriver(driver string) CtrCreateOption { return func(ctr *Container) error { @@ -1158,7 +1108,7 @@ func WithLogTag(tag string) CtrCreateOption { } } -// WithCgroupsMode disables the creation of CGroups for the conmon process. +// WithCgroupsMode disables the creation of Cgroups for the conmon process. func WithCgroupsMode(mode string) CtrCreateOption { return func(ctr *Container) error { if ctr.valid { @@ -1554,20 +1504,6 @@ func WithCreateWorkingDir() CtrCreateOption { } } -// WithNetworkAliases sets network aliases for the container. -// Accepts a map of network name to aliases. -func WithNetworkAliases(aliases map[string][]string) CtrCreateOption { - return func(ctr *Container) error { - if ctr.valid { - return define.ErrCtrFinalized - } - - ctr.config.NetworkAliases = aliases - - return nil - } -} - // Volume Creation Options // WithVolumeName sets the name of the volume. @@ -1698,6 +1634,19 @@ func WithVolumeNoChown() VolumeCreateOption { } } +// WithVolumeDisableQuota prevents the volume from being assigned a quota. +func WithVolumeDisableQuota() VolumeCreateOption { + return func(volume *Volume) error { + if volume.valid { + return define.ErrVolumeFinalized + } + + volume.config.DisableQuota = true + + return nil + } +} + // withSetAnon sets a bool notifying libpod that this volume is anonymous and // should be removed when containers using it are removed and volumes are // specified for removal. @@ -1726,7 +1675,7 @@ func WithTimezone(path string) CtrCreateOption { if err != nil { return err } - //We don't want to mount a timezone directory + // We don't want to mount a timezone directory if file.IsDir() { return errors.New("Invalid timezone: is a directory") } @@ -1795,6 +1744,17 @@ func WithPidFile(pidFile string) CtrCreateOption { } } +// WithHostUsers indicates host users to add to /etc/passwd +func WithHostUsers(hostUsers []string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + ctr.config.HostUsers = hostUsers + return nil + } +} + // WithInitCtrType indicates the container is a initcontainer func WithInitCtrType(containerType string) CtrCreateOption { return func(ctr *Container) error { @@ -1821,6 +1781,36 @@ func WithHostDevice(dev []specs.LinuxDevice) CtrCreateOption { } } +// WithSelectedPasswordManagement makes it so that the container either does or does not setup /etc/passwd or /etc/group +func WithSelectedPasswordManagement(passwd *bool) CtrCreateOption { + return func(c *Container) error { + if c.valid { + return define.ErrCtrFinalized + } + c.config.Passwd = passwd + return nil + } +} + +// WithInfraConfig allows for inheritance of compatible config entities from the infra container +func WithInfraConfig(compatibleOptions InfraInherit) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + compatMarshal, err := json.Marshal(compatibleOptions) + if err != nil { + return errors.New("Could not marshal compatible options") + } + + err = json.Unmarshal(compatMarshal, ctr.config) + if err != nil { + return errors.New("Could not unmarshal compatible options into contrainer config") + } + return nil + } +} + // Pod Creation Options // WithPodCreateCommand adds the full command plus arguments of the current @@ -1915,8 +1905,8 @@ func WithPodCgroupParent(path string) PodCreateOption { // WithPodCgroups tells containers in this pod to use the cgroup created for // this pod. // This can still be overridden at the container level by explicitly specifying -// a CGroup parent. -func WithPodCgroups() PodCreateOption { +// a Cgroup parent. +func WithPodParent() PodCreateOption { return func(pod *Pod) error { if pod.valid { return define.ErrPodFinalized @@ -2073,21 +2063,6 @@ func WithInfraContainer() PodCreateOption { } } -// WithInfraContainerPorts tells the pod to add port bindings to the pause container -func WithInfraContainerPorts(bindings []nettypes.OCICNIPortMapping, infraSpec *specgen.SpecGenerator) []nettypes.PortMapping { - bindingSpec := []nettypes.PortMapping{} - for _, bind := range bindings { - currBind := nettypes.PortMapping{} - currBind.ContainerPort = uint16(bind.ContainerPort) - currBind.HostIP = bind.HostIP - currBind.HostPort = uint16(bind.HostPort) - currBind.Protocol = bind.Protocol - bindingSpec = append(bindingSpec, currBind) - } - infraSpec.PortMappings = bindingSpec - return infraSpec.PortMappings -} - // WithVolatile sets the volatile flag for the container storage. // The option can potentially cause data loss when used on a container that must survive a machine reboot. func WithVolatile() CtrCreateOption { @@ -2101,3 +2076,31 @@ func WithVolatile() CtrCreateOption { return nil } } + +// WithChrootDirs is an additional set of directories that need to be +// treated as root directories. Standard bind mounts will be mounted +// into paths relative to these directories. +func WithChrootDirs(dirs []string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.ChrootDirs = dirs + + return nil + } +} + +// WithPasswdEntry sets the entry to write to the /etc/passwd file. +func WithPasswdEntry(passwdEntry string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.PasswdEntry = passwdEntry + + return nil + } +} diff --git a/vendor/github.com/containers/podman/v3/libpod/plugin/volume_api.go b/vendor/github.com/containers/podman/v4/libpod/plugin/volume_api.go similarity index 99% rename from vendor/github.com/containers/podman/v3/libpod/plugin/volume_api.go rename to vendor/github.com/containers/podman/v4/libpod/plugin/volume_api.go index fafd26dac41..a6d66a03406 100644 --- a/vendor/github.com/containers/podman/v3/libpod/plugin/volume_api.go +++ b/vendor/github.com/containers/podman/v4/libpod/plugin/volume_api.go @@ -12,7 +12,7 @@ import ( "sync" "time" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/docker/go-plugins-helpers/sdk" "github.com/docker/go-plugins-helpers/volume" jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/podman/v3/libpod/pod.go b/vendor/github.com/containers/podman/v4/libpod/pod.go similarity index 91% rename from vendor/github.com/containers/podman/v3/libpod/pod.go rename to vendor/github.com/containers/podman/v4/libpod/pod.go index 068a835f63a..237c4290149 100644 --- a/vendor/github.com/containers/podman/v3/libpod/pod.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod.go @@ -1,14 +1,13 @@ package libpod import ( - "context" "fmt" "sort" "strings" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/lock" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/lock" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -44,9 +43,9 @@ type PodConfig struct { // Labels contains labels applied to the pod Labels map[string]string `json:"labels"` - // CgroupParent contains the pod's CGroup parent + // CgroupParent contains the pod's Cgroup parent CgroupParent string `json:"cgroupParent"` - // UsePodCgroup indicates whether the pod will create its own CGroup and + // UsePodCgroup indicates whether the pod will create its own Cgroup and // join containers to it. // If true, all containers joined to the pod will use the pod cgroup as // their cgroup parent, and cannot set a different cgroup parent @@ -77,7 +76,7 @@ type PodConfig struct { // podState represents a pod's state type podState struct { - // CgroupPath is the path to the pod's CGroup + // CgroupPath is the path to the pod's Cgroup CgroupPath string `json:"cgroupPath"` // InfraContainerID is the container that holds pod namespace information // Most often an infra container @@ -159,6 +158,15 @@ func (p *Pod) CPUQuota() int64 { return 0 } +// NetworkMode returns the Network mode given by the user ex: pod, private... +func (p *Pod) NetworkMode() string { + infra, err := p.runtime.GetContainer(p.state.InfraContainerID) + if err != nil { + return "" + } + return infra.NetworkMode() +} + // PidMode returns the PID mode given by the user ex: pod, private... func (p *Pod) PidMode() string { infra, err := p.runtime.GetContainer(p.state.InfraContainerID) @@ -237,7 +245,7 @@ func (p *Pod) CreateCommand() []string { return p.config.CreateCommand } -// CgroupParent returns the pod's CGroup parent +// CgroupParent returns the pod's Cgroup parent func (p *Pod) CgroupParent() string { return p.config.CgroupParent } @@ -289,42 +297,16 @@ func (p *Pod) Hostname() string { return p.config.Hostname } -// CgroupPath returns the path to the pod's CGroup +// CgroupPath returns the path to the pod's Cgroup func (p *Pod) CgroupPath() (string, error) { p.lock.Lock() defer p.lock.Unlock() if err := p.updatePod(); err != nil { return "", err } - if p.state.CgroupPath != "" { - return p.state.CgroupPath, nil - } if p.state.InfraContainerID == "" { return "", errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container") } - - id, err := p.infraContainerID() - if err != nil { - return "", err - } - - if id != "" { - ctr, err := p.infraContainer() - if err != nil { - return "", errors.Wrapf(err, "could not get infra") - } - if ctr != nil { - ctr.Start(context.Background(), true) - cgroupPath, err := ctr.CGroupPath() - fmt.Println(cgroupPath) - if err != nil { - return "", errors.Wrapf(err, "could not get container cgroup") - } - p.state.CgroupPath = cgroupPath - p.save() - return cgroupPath, nil - } - } return p.state.CgroupPath, nil } @@ -390,7 +372,7 @@ func (p *Pod) InfraContainerID() (string, error) { return p.infraContainerID() } -// infraContainer is the unlocked versio of InfraContainer which returns the infra container +// infraContainer is the unlocked version of InfraContainer which returns the infra container func (p *Pod) infraContainer() (*Container, error) { id, err := p.infraContainerID() if err != nil { @@ -422,10 +404,6 @@ type PodContainerStats struct { // GetPodStats returns the stats for each of its containers func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerStats) (map[string]*define.ContainerStats, error) { - var ( - ok bool - prevStat *define.ContainerStats - ) p.lock.Lock() defer p.lock.Unlock() @@ -438,10 +416,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerSta } newContainerStats := make(map[string]*define.ContainerStats) for _, c := range containers { - if prevStat, ok = previousContainerStats[c.ID()]; !ok { - prevStat = &define.ContainerStats{} - } - newStats, err := c.GetContainerStats(prevStat) + newStats, err := c.GetContainerStats(previousContainerStats[c.ID()]) // If the container wasn't running, don't include it // but also suppress the error if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid { diff --git a/vendor/github.com/containers/podman/v3/libpod/pod_api.go b/vendor/github.com/containers/podman/v4/libpod/pod_api.go similarity index 96% rename from vendor/github.com/containers/podman/v3/libpod/pod_api.go rename to vendor/github.com/containers/podman/v4/libpod/pod_api.go index feb8ff25083..ba30d878ec8 100644 --- a/vendor/github.com/containers/podman/v3/libpod/pod_api.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod_api.go @@ -3,11 +3,11 @@ package libpod import ( "context" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/parallel" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/parallel" + "github.com/containers/podman/v4/pkg/rootless" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -586,13 +586,14 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) { var inspectMounts []define.InspectMount var devices []define.InspectDevice var deviceLimits []define.InspectBlkioThrottleDevice + var infraSecurity []string if p.state.InfraContainerID != "" { infra, err := p.runtime.GetContainer(p.state.InfraContainerID) if err != nil { return nil, err } infraConfig = new(define.InspectPodInfraConfig) - infraConfig.HostNetwork = !infra.config.ContainerNetworkConfig.UseImageHosts + infraConfig.HostNetwork = p.NetworkMode() == "host" infraConfig.StaticIP = infra.config.ContainerNetworkConfig.StaticIP infraConfig.NoManageResolvConf = infra.config.UseImageResolvConf infraConfig.NoManageHosts = infra.config.UseImageHosts @@ -601,8 +602,9 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) { infraConfig.CPUSetCPUs = p.ResourceLim().CPU.Cpus infraConfig.PidNS = p.PidMode() infraConfig.UserNS = p.UserNSMode() - namedVolumes, mounts := infra.sortUserVolumes(infra.config.Spec) - inspectMounts, err = infra.GetInspectMounts(namedVolumes, infra.config.ImageVolumes, mounts) + namedVolumes, mounts := infra.SortUserVolumes(infra.config.Spec) + inspectMounts, err = infra.GetMounts(namedVolumes, infra.config.ImageVolumes, mounts) + infraSecurity = infra.GetSecurityOptions() if err != nil { return nil, err } @@ -637,9 +639,17 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) { infraConfig.HostAdd = make([]string, 0, len(infra.config.HostAdd)) infraConfig.HostAdd = append(infraConfig.HostAdd, infra.config.HostAdd...) } - if len(infra.config.ContainerNetworkConfig.Networks) > 0 { - infraConfig.Networks = make([]string, 0, len(infra.config.ContainerNetworkConfig.Networks)) - infraConfig.Networks = append(infraConfig.Networks, infra.config.ContainerNetworkConfig.Networks...) + + networks, err := infra.networks() + if err != nil { + return nil, err + } + netNames := make([]string, 0, len(networks)) + for name := range networks { + netNames = append(netNames, name) + } + if len(netNames) > 0 { + infraConfig.Networks = netNames } infraConfig.NetworkOptions = infra.config.ContainerNetworkConfig.NetworkOptions infraConfig.PortBindings = makeInspectPortBindings(infra.config.ContainerNetworkConfig.PortMappings, nil) @@ -670,6 +680,7 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) { Devices: devices, BlkioDeviceReadBps: deviceLimits, VolumesFrom: p.VolumesFrom(), + SecurityOpts: infraSecurity, } return &inspectData, nil diff --git a/vendor/github.com/containers/podman/v3/libpod/pod_internal.go b/vendor/github.com/containers/podman/v4/libpod/pod_internal.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/pod_internal.go rename to vendor/github.com/containers/podman/v4/libpod/pod_internal.go index d903b8719ae..41f745e6cbf 100644 --- a/vendor/github.com/containers/podman/v3/libpod/pod_internal.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod_internal.go @@ -6,8 +6,8 @@ import ( "time" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/storage/pkg/stringid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -71,7 +71,7 @@ func (p *Pod) refresh() error { case config.SystemdCgroupsManager: cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID())) if err != nil { - logrus.Errorf("Creating CGroup for pod %s: %v", p.ID(), err) + logrus.Errorf("Creating Cgroup for pod %s: %v", p.ID(), err) } p.state.CgroupPath = cgroupPath case config.CgroupfsCgroupsManager: diff --git a/vendor/github.com/containers/podman/v3/libpod/pod_status.go b/vendor/github.com/containers/podman/v4/libpod/pod_status.go similarity index 96% rename from vendor/github.com/containers/podman/v3/libpod/pod_status.go rename to vendor/github.com/containers/podman/v4/libpod/pod_status.go index 8552f56814d..aabdfe50efa 100644 --- a/vendor/github.com/containers/podman/v3/libpod/pod_status.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod_status.go @@ -1,6 +1,6 @@ package libpod -import "github.com/containers/podman/v3/libpod/define" +import "github.com/containers/podman/v4/libpod/define" // GetPodStatus determines the status of the pod based on the // statuses of the containers in the pod. diff --git a/vendor/github.com/containers/podman/v3/libpod/pod_top_linux.go b/vendor/github.com/containers/podman/v4/libpod/pod_top_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v3/libpod/pod_top_linux.go rename to vendor/github.com/containers/podman/v4/libpod/pod_top_linux.go index aee62d832ad..83a07080785 100644 --- a/vendor/github.com/containers/podman/v3/libpod/pod_top_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod_top_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod @@ -6,8 +7,8 @@ import ( "strconv" "strings" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/psgo" ) diff --git a/vendor/github.com/containers/podman/v3/libpod/reset.go b/vendor/github.com/containers/podman/v4/libpod/reset.go similarity index 83% rename from vendor/github.com/containers/podman/v3/libpod/reset.go rename to vendor/github.com/containers/podman/v4/libpod/reset.go index 5d9bb0e90d7..28d0ee3f6e8 100644 --- a/vendor/github.com/containers/podman/v3/libpod/reset.go +++ b/vendor/github.com/containers/podman/v4/libpod/reset.go @@ -7,10 +7,11 @@ import ( "path/filepath" "github.com/containers/common/libimage" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/errorhandling" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -70,6 +71,22 @@ func (r *Runtime) Reset(ctx context.Context) error { } } + // remove all networks + nets, err := r.network.NetworkList() + if err != nil { + return err + } + for _, net := range nets { + // do not delete the default network + if net.Name == r.network.DefaultNetworkName() { + continue + } + // ignore not exists errors because of the TOCTOU problem + if err := r.network.NetworkRemove(net.Name); err != nil && !errors.Is(err, types.ErrNoSuchNetwork) { + logrus.Errorf("Removing network %s: %v", net.Name, err) + } + } + xdgRuntimeDir := filepath.Clean(os.Getenv("XDG_RUNTIME_DIR")) _, prevError := r.store.Shutdown(true) graphRoot := filepath.Clean(r.store.GraphRoot()) diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime.go b/vendor/github.com/containers/podman/v4/libpod/runtime.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/runtime.go rename to vendor/github.com/containers/podman/v4/libpod/runtime.go index 855f3a9f960..78ff8ce944e 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime.go @@ -11,7 +11,6 @@ import ( "regexp" "strconv" "strings" - "sync" "syscall" "time" @@ -19,23 +18,23 @@ import ( "github.com/containers/buildah/pkg/parse" "github.com/containers/common/libimage" + "github.com/containers/common/libnetwork/network" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/pkg/sysregistriesv2" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/libpod/lock" - "github.com/containers/podman/v3/libpod/network/cni" - nettypes "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/libpod/plugin" - "github.com/containers/podman/v3/libpod/shutdown" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/systemd" - "github.com/containers/podman/v3/pkg/util" - "github.com/containers/podman/v3/utils" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/libpod/lock" + "github.com/containers/podman/v4/libpod/plugin" + "github.com/containers/podman/v4/libpod/shutdown" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/systemd" + "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v4/utils" "github.com/containers/storage" "github.com/containers/storage/pkg/unshare" "github.com/docker/docker/pkg/namesgenerator" @@ -87,6 +86,11 @@ type Runtime struct { libimageEventsShutdown chan bool lockManager lock.Manager + // syslog describes whenever logrus should log to the syslog as well. + // Note that the syslog hook will be enabled early in cmd/podman/syslog_linux.go + // This bool is just needed so that we can set it for netavark interface. + syslog bool + // doRenumber indicates that the runtime should perform a lock renumber // during initialization. // Once the runtime has been initialized and returned, this variable is @@ -104,7 +108,6 @@ type Runtime struct { // and remains true until the runtime is shut down (rendering its // storage unusable). When valid is false, the runtime cannot be used. valid bool - lock sync.RWMutex // mechanism to read and write even logs eventer events.Eventer @@ -164,8 +167,7 @@ func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) if err != nil { return nil, err } - conf.CheckCgroupsAndAdjustConfig() - return newRuntimeFromConfig(ctx, conf, options...) + return newRuntimeFromConfig(conf, options...) } // NewRuntimeFromConfig creates a new container runtime using the given @@ -174,10 +176,10 @@ func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) // An error will be returned if the configuration file at the given path does // not exist or cannot be loaded func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) { - return newRuntimeFromConfig(ctx, userConfig, options...) + return newRuntimeFromConfig(userConfig, options...) } -func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) { +func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runtime, error) { runtime := new(Runtime) if conf.Engine.OCIRuntime == "" { @@ -208,6 +210,10 @@ func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...R } if err := shutdown.Register("libpod", func(sig os.Signal) error { + // For `systemctl stop podman.service` support, exit code should be 0 + if sig == syscall.SIGTERM { + os.Exit(0) + } os.Exit(1) return nil }); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists { @@ -218,10 +224,12 @@ func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...R return nil, errors.Wrapf(err, "error starting shutdown signal handler") } - if err := makeRuntime(ctx, runtime); err != nil { + if err := makeRuntime(runtime); err != nil { return nil, err } + runtime.config.CheckCgroupsAndAdjustConfig() + return runtime, nil } @@ -284,7 +292,7 @@ func getLockManager(runtime *Runtime) (lock.Manager, error) { // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime -func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { +func makeRuntime(runtime *Runtime) (retErr error) { // Find a working conmon binary cPath, err := findConmon(runtime.config.Engine.ConmonPath) if err != nil { @@ -483,20 +491,16 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { } } - netInterface, err := cni.NewCNINetworkInterface(cni.InitConfig{ - CNIConfigDir: runtime.config.Network.NetworkConfigDir, - CNIPluginDirs: runtime.config.Network.CNIPluginDirs, - DefaultNetwork: runtime.config.Network.DefaultNetwork, - DefaultSubnet: runtime.config.Network.DefaultSubnet, - IsMachine: runtime.config.Engine.MachineEnabled, - LockFile: filepath.Join(runtime.config.Network.NetworkConfigDir, "cni.lock"), - }) - if err != nil { - return errors.Wrapf(err, "could not create network interface") + // the store is only setup when we are in the userns so we do the same for the network interface + if !needsUserns { + netBackend, netInterface, err := network.NetworkBackend(runtime.store, runtime.config, runtime.syslog) + if err != nil { + return err + } + runtime.config.Network.NetworkBackend = string(netBackend) + runtime.network = netInterface } - runtime.network = netInterface - // We now need to see if the system has restarted // We check for the presence of a file in our tmp directory to verify this // This check must be locked to prevent races @@ -543,7 +547,13 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { return err } if became { + // Check if the pause process was created. If it was created, then + // move it to its own systemd scope. utils.MovePauseProcessToScope(pausePid) + + // gocritic complains because defer is not run on os.Exit() + // However this is fine because the lock is released anyway when the process exits + //nolint:gocritic os.Exit(ret) } } @@ -592,7 +602,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { runtime.valid = true if runtime.doMigrate { - if err := runtime.migrate(ctx); err != nil { + if err := runtime.migrate(); err != nil { return err } } @@ -709,9 +719,6 @@ func (r *Runtime) TmpDir() (string, error) { // Note that the returned value is not a copy and must hence // only be used in a reading fashion. func (r *Runtime) GetConfigNoCopy() (*config.Config, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -806,9 +813,6 @@ func (r *Runtime) DeferredShutdown(force bool) { // cleaning up; if force is false, an error will be returned if there are // still containers running or mounted func (r *Runtime) Shutdown(force bool) error { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return define.ErrRuntimeStopped } @@ -1012,9 +1016,6 @@ func (r *Runtime) RunRoot() string { // If the given ID does not correspond to any existing Pod or Container, // ErrNoSuchCtr is returned. func (r *Runtime) GetName(id string) (string, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return "", define.ErrRuntimeStopped } @@ -1075,7 +1076,9 @@ func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) { logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp) } c.TmpDir = dbConfig.LibpodTmp - c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log") + if c.EventsLogFilePath == "" { + c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log") + } } if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" { @@ -1141,7 +1144,7 @@ func (r *Runtime) getVolumePlugin(name string) (*plugin.VolumePlugin, error) { return plugin.GetVolumePlugin(name, pluginPath) } -// GetSecretsStoreageDir returns the directory that the secrets manager should take +// GetSecretsStorageDir returns the directory that the secrets manager should take func (r *Runtime) GetSecretsStorageDir() string { return filepath.Join(r.store.GraphRoot(), "secrets") } @@ -1189,7 +1192,17 @@ func (r *Runtime) Network() nettypes.ContainerNetwork { return r.network } -// Network returns the network interface which is used by the runtime +// GetDefaultNetworkName returns the network interface which is used by the runtime func (r *Runtime) GetDefaultNetworkName() string { return r.config.Network.DefaultNetwork } + +// RemoteURI returns the API server URI +func (r *Runtime) RemoteURI() string { + return r.config.Engine.RemoteURI +} + +// SetRemoteURI records the API server URI +func (r *Runtime) SetRemoteURI(uri string) { + r.config.Engine.RemoteURI = uri +} diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_cstorage.go b/vendor/github.com/containers/podman/v4/libpod/runtime_cstorage.go similarity index 91% rename from vendor/github.com/containers/podman/v3/libpod/runtime_cstorage.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_cstorage.go index 5694967aaaf..1c528e1b82d 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_cstorage.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_cstorage.go @@ -3,7 +3,7 @@ package libpod import ( "time" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/containers/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -21,9 +21,6 @@ type StorageContainer struct { // ListStorageContainers lists all containers visible to c/storage. func (r *Runtime) ListStorageContainers() ([]*StorageContainer, error) { - r.lock.RLock() - defer r.lock.RUnlock() - finalCtrs := []*StorageContainer{} ctrs, err := r.store.Containers() @@ -61,15 +58,6 @@ func (r *Runtime) StorageContainer(idOrName string) (*storage.Container, error) // Accepts ID or full name of container. // If force is set, the container will be unmounted first to ensure removal. func (r *Runtime) RemoveStorageContainer(idOrName string, force bool) error { - r.lock.Lock() - defer r.lock.Unlock() - - return r.removeStorageContainer(idOrName, force) -} - -// Internal function to remove the container storage without -// locking the runtime. -func (r *Runtime) removeStorageContainer(idOrName string, force bool) error { targetID, err := r.store.Lookup(idOrName) if err != nil { if errors.Cause(err) == storage.ErrLayerUnknown { diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_ctr.go b/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go similarity index 86% rename from vendor/github.com/containers/podman/v3/libpod/runtime_ctr.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go index 2256ba57c20..df7174ac6e4 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_ctr.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go @@ -2,6 +2,7 @@ package libpod import ( "context" + "fmt" "os" "path" "path/filepath" @@ -9,14 +10,16 @@ import ( "time" "github.com/containers/buildah" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/libpod/shutdown" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/domain/entities/reports" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/libpod/shutdown" + "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/stringid" "github.com/docker/go-units" @@ -39,8 +42,6 @@ type ContainerFilter func(*Container) bool // NewContainer creates a new container from a given OCI config. func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, spec *specgen.SpecGenerator, infra bool, options ...CtrCreateOption) (*Container, error) { - r.lock.Lock() - defer r.lock.Unlock() if !r.valid { return nil, define.ErrRuntimeStopped } @@ -78,8 +79,6 @@ func (r *Runtime) PrepareVolumeOnCreateContainer(ctx context.Context, ctr *Conta // RestoreContainer re-creates a container from an imported checkpoint func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config *ContainerConfig) (*Container, error) { - r.lock.Lock() - defer r.lock.Unlock() if !r.valid { return nil, define.ErrRuntimeStopped } @@ -156,6 +155,7 @@ func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName s return nil, err } + ctr.newContainerEvent(events.Rename) return ctr, nil } @@ -174,6 +174,8 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf return nil, errors.Wrapf(err, "converting containers.conf ShmSize %s to an int", r.config.Containers.ShmSize) } ctr.config.ShmSize = size + ctr.config.NoShm = false + ctr.config.NoShmShare = false ctr.config.StopSignal = 15 ctr.config.StopTimeout = r.config.Engine.StopTimeout @@ -186,11 +188,14 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf // If the ID is empty a new name for the restored container was requested if ctr.config.ID == "" { ctr.config.ID = stringid.GenerateNonCryptoID() - // Fixup ExitCommand with new ID - ctr.config.ExitCommand[len(ctr.config.ExitCommand)-1] = ctr.config.ID } // Reset the log path to point to the default ctr.config.LogPath = "" + // Later in validate() the check is for nil. JSONDeepCopy sets it to an empty + // object. Resetting it to nil if it was nil before. + if config.StaticMAC == nil { + ctr.config.StaticMAC = nil + } } ctr.config.Spec = rSpec @@ -232,39 +237,47 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options .. func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Container, retErr error) { // normalize the networks to names - // ocicni only knows about cni names so we have to make + // the db backend only knows about network names so we have to make // sure we do not use ids internally if len(ctr.config.Networks) > 0 { - netNames := make([]string, 0, len(ctr.config.Networks)) - for _, nameOrID := range ctr.config.Networks { - netName, err := r.normalizeNetworkName(nameOrID) - if err != nil { - return nil, err + normalizeNetworks := make(map[string]types.PerNetworkOptions, len(ctr.config.Networks)) + // first get the already used interface names so we do not conflict + usedIfNames := make([]string, 0, len(ctr.config.Networks)) + for _, opts := range ctr.config.Networks { + if opts.InterfaceName != "" { + // check that no name is assigned to more than network + if util.StringInSlice(opts.InterfaceName, usedIfNames) { + return nil, errors.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName) + } + usedIfNames = append(usedIfNames, opts.InterfaceName) } - netNames = append(netNames, netName) } - ctr.config.Networks = netNames - } - - // https://github.com/containers/podman/issues/11285 - // normalize the networks aliases to use network names and never ids - if len(ctr.config.NetworkAliases) > 0 { - netAliases := make(map[string][]string, len(ctr.config.NetworkAliases)) - for nameOrID, aliases := range ctr.config.NetworkAliases { + i := 0 + for nameOrID, opts := range ctr.config.Networks { netName, err := r.normalizeNetworkName(nameOrID) if err != nil { return nil, err } - network, err := r.network.NetworkInspect(netName) - if err != nil { - return nil, err - } - if !network.DNSEnabled { - return nil, errors.Wrapf(define.ErrInvalidArg, "cannot set network aliases for network %q because dns is disabled", netName) + // assign interface name if empty + if opts.InterfaceName == "" { + for i < 100000 { + ifName := fmt.Sprintf("eth%d", i) + if !util.StringInSlice(ifName, usedIfNames) { + opts.InterfaceName = ifName + usedIfNames = append(usedIfNames, ifName) + break + } + i++ + } + // if still empty we did not find a free name + if opts.InterfaceName == "" { + return nil, errors.New("failed to find free network interface name") + } } - netAliases[netName] = aliases + + normalizeNetworks[netName] = opts } - ctr.config.NetworkAliases = netAliases + ctr.config.Networks = normalizeNetworks } // Validate the container @@ -326,17 +339,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } } - if ctr.config.Name == "" { - name, err := r.generateName() - if err != nil { - return nil, err - } - - ctr.config.Name = name - } - - // Check CGroup parent sanity, and set it if it was not set. - // Only if we're actually configuring CGroups. + // Check Cgroup parent sanity, and set it if it was not set. + // Only if we're actually configuring Cgroups. if !ctr.config.NoCgroups { ctr.config.CgroupManager = r.config.Engine.CgroupManager switch r.config.Engine.CgroupManager { @@ -347,7 +351,11 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai if err != nil { return nil, errors.Wrapf(err, "error retrieving pod %s cgroup", pod.ID()) } - if podCgroup == "" { + expectPodCgroup, err := ctr.expectPodCgroup() + if err != nil { + return nil, err + } + if expectPodCgroup && podCgroup == "" { return nil, errors.Wrapf(define.ErrInternal, "pod %s cgroup is not set", pod.ID()) } canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(podCgroup) @@ -378,7 +386,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") } default: - return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager) + return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager) } } @@ -389,7 +397,11 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai if ctr.restoreFromCheckpoint { // Remove information about bind mount // for new container from imported checkpoint - g := generate.Generator{Config: ctr.config.Spec} + + // NewFromSpec() is deprecated according to its comment + // however the recommended replace just causes a nil map panic + //nolint:staticcheck + g := generate.NewFromSpec(ctr.config.Spec) g.RemoveMount("/dev/shm") ctr.config.ShmDir = "" g.RemoveMount("/etc/resolv.conf") @@ -398,7 +410,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai g.RemoveMount("/run/.containerenv") g.RemoveMount("/run/secrets") - // Regenerate CGroup paths so they don't point to the old + // Regenerate Cgroup paths so they don't point to the old // container ID. cgroupPath, err := ctr.getOCICgroupPath() if err != nil { @@ -420,7 +432,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai }() ctr.config.SecretsPath = filepath.Join(ctr.config.StaticDir, "secrets") - err = os.MkdirAll(ctr.config.SecretsPath, 0644) + err = os.MkdirAll(ctr.config.SecretsPath, 0755) if err != nil { return nil, err } @@ -469,7 +481,27 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai if isAnonymous { volOptions = append(volOptions, withSetAnon()) } - newVol, err := r.newVolume(ctx, volOptions...) + + // If volume-opts are set parse and add driver opts. + if len(vol.Options) > 0 { + isDriverOpts := false + driverOpts := make(map[string]string) + for _, opts := range vol.Options { + if strings.HasPrefix(opts, "volume-opt") { + isDriverOpts = true + driverOptKey, driverOptValue, err := util.ParseDriverOpts(opts) + if err != nil { + return nil, err + } + driverOpts[driverOptKey] = driverOptValue + } + } + if isDriverOpts { + parsedOptions := []VolumeCreateOption{WithVolumeOptions(driverOpts)} + volOptions = append(volOptions, parsedOptions...) + } + } + newVol, err := r.newVolume(volOptions...) if err != nil { return nil, errors.Wrapf(err, "error creating named volume %q", vol.Name) } @@ -481,14 +513,16 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai case define.NoLogging, define.PassthroughLogging: break case define.JournaldLogging: - ctr.initializeJournal(ctx) + if err := ctr.initializeJournal(ctx); err != nil { + return nil, fmt.Errorf("failed to initialize journal: %w", err) + } default: if ctr.config.LogPath == "" { ctr.config.LogPath = filepath.Join(ctr.config.StaticDir, "ctr.log") } } - if !MountExists(ctr.config.Spec.Mounts, "/dev/shm") && ctr.config.ShmDir == "" { + if !MountExists(ctr.config.Spec.Mounts, "/dev/shm") && ctr.config.ShmDir == "" && !ctr.config.NoShm { ctr.config.ShmDir = filepath.Join(ctr.bundlePath(), "shm") if err := os.MkdirAll(ctr.config.ShmDir, 0700); err != nil { if !os.IsExist(err) { @@ -536,8 +570,6 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai // be removed also if and only if the container is the sole user // Otherwise, RemoveContainer will return an error if the container is running func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool, timeout *uint) error { - r.lock.Lock() - defer r.lock.Unlock() return r.removeContainer(ctx, c, force, removeVolume, false, timeout) } @@ -640,6 +672,20 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo } } + // Check that no other containers depend on the container. + // Only used if not removing a pod - pods guarantee that all + // deps will be evicted at the same time. + if !removePod { + deps, err := r.state.ContainerInUse(c) + if err != nil { + return err + } + if len(deps) != 0 { + depsStr := strings.Join(deps, ", ") + return errors.Wrapf(define.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr) + } + } + // Check that the container's in a good state to be removed. if c.state.State == define.ContainerStateRunning { time := c.StopTimeout() @@ -662,25 +708,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo } } - // Remove all active exec sessions - if err := c.removeAllExecSessions(); err != nil { - return err - } - - // Check that no other containers depend on the container. - // Only used if not removing a pod - pods guarantee that all - // deps will be evicted at the same time. - if !removePod { - deps, err := r.state.ContainerInUse(c) - if err != nil { - return err - } - if len(deps) != 0 { - depsStr := strings.Join(deps, ", ") - return errors.Wrapf(define.ErrCtrExists, "container %s has dependent containers which must be removed before it: %s", c.ID(), depsStr) - } - } - var cleanupErr error // Clean up network namespace, cgroups, mounts. @@ -700,6 +727,14 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID()) } + // Remove all active exec sessions + // removing the exec sessions might temporarily unlock the container's lock. Using it + // after setting the state to ContainerStateRemoving will prevent that the container is + // restarted + if err := c.removeAllExecSessions(); err != nil { + return err + } + // Stop the container's storage if err := c.teardownStorage(); err != nil { if cleanupErr == nil { @@ -756,6 +791,14 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo continue } if err := runtime.removeVolume(ctx, volume, false, timeout); err != nil && errors.Cause(err) != define.ErrNoSuchVolume { + if errors.Cause(err) == define.ErrVolumeBeingUsed { + // Ignore error, since podman will report original error + volumesFrom, _ := c.volumesFrom() + if len(volumesFrom) > 0 { + logrus.Debugf("Cleanup volume not possible since volume is in use (%s)", v) + continue + } + } logrus.Errorf("Cleanup volume (%s): %v", v, err) } } @@ -772,8 +815,6 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo // If removeVolume is specified, named volumes used by the container will // be removed also if and only if the container is the sole user. func (r *Runtime) EvictContainer(ctx context.Context, idOrName string, removeVolume bool) (string, error) { - r.lock.RLock() - defer r.lock.RUnlock() return r.evictContainer(ctx, idOrName, removeVolume) } @@ -882,7 +923,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol } // Remove container from c/storage - if err := r.removeStorageContainer(id, true); err != nil { + if err := r.RemoveStorageContainer(id, true); err != nil { if cleanupErr == nil { cleanupErr = err } @@ -906,11 +947,60 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol return id, cleanupErr } +// RemoveDepend removes all dependencies for a container. +// If the container is an infra container, the entire pod gets removed. +func (r *Runtime) RemoveDepend(ctx context.Context, rmCtr *Container, force bool, removeVolume bool, timeout *uint) ([]*reports.RmReport, error) { + logrus.Debugf("Removing container %s and all dependent containers", rmCtr.ID()) + rmReports := make([]*reports.RmReport, 0) + if rmCtr.IsInfra() { + pod, err := r.GetPod(rmCtr.PodID()) + if err != nil { + return nil, err + } + logrus.Debugf("Removing pod %s: depends on infra container %s", pod.ID(), rmCtr.ID()) + podContainerIDS, err := pod.AllContainersByID() + if err != nil { + return nil, err + } + if err := r.RemovePod(ctx, pod, true, force, timeout); err != nil { + return nil, err + } + for _, cID := range podContainerIDS { + rmReports = append(rmReports, &reports.RmReport{Id: cID}) + } + return rmReports, nil + } + + deps, err := r.state.ContainerInUse(rmCtr) + if err != nil { + if err == define.ErrCtrRemoved { + return rmReports, nil + } + return rmReports, err + } + for _, cid := range deps { + ctr, err := r.state.Container(cid) + if err != nil { + if err == define.ErrNoSuchCtr { + continue + } + return rmReports, err + } + + reports, err := r.RemoveDepend(ctx, ctr, force, removeVolume, timeout) + if err != nil { + return rmReports, err + } + rmReports = append(rmReports, reports...) + } + + report := reports.RmReport{Id: rmCtr.ID()} + report.Err = r.removeContainer(ctx, rmCtr, force, removeVolume, false, timeout) + return append(rmReports, &report), nil +} + // GetContainer retrieves a container by its ID func (r *Runtime) GetContainer(id string) (*Container, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -920,9 +1010,6 @@ func (r *Runtime) GetContainer(id string) (*Container, error) { // HasContainer checks if a container with the given ID is present func (r *Runtime) HasContainer(id string) (bool, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return false, define.ErrRuntimeStopped } @@ -933,9 +1020,6 @@ func (r *Runtime) HasContainer(id string) (bool, error) { // LookupContainer looks up a container by its name or a partial ID // If a partial ID is not unique, an error will be returned func (r *Runtime) LookupContainer(idOrName string) (*Container, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -945,9 +1029,6 @@ func (r *Runtime) LookupContainer(idOrName string) (*Container, error) { // LookupContainerId looks up a container id by its name or a partial ID // If a partial ID is not unique, an error will be returned func (r *Runtime) LookupContainerID(idOrName string) (string, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return "", define.ErrRuntimeStopped } @@ -959,13 +1040,6 @@ func (r *Runtime) LookupContainerID(idOrName string) (string, error) { // the output. Multiple filters are handled by ANDing their output, so only // containers matching all filters are returned func (r *Runtime) GetContainers(filters ...ContainerFilter) ([]*Container, error) { - r.lock.RLock() - defer r.lock.RUnlock() - return r.GetContainersWithoutLock(filters...) -} - -// GetContainersWithoutLock is same as GetContainers but without lock -func (r *Runtime) GetContainersWithoutLock(filters ...ContainerFilter) ([]*Container, error) { if !r.valid { return nil, define.ErrRuntimeStopped } @@ -1043,9 +1117,6 @@ func (r *Runtime) GetLatestContainer() (*Container, error) { // GetExecSessionContainer gets the container that a given exec session ID is // attached to. func (r *Runtime) GetExecSessionContainer(id string) (*Container, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_img.go b/vendor/github.com/containers/podman/v4/libpod/runtime_img.go similarity index 79% rename from vendor/github.com/containers/podman/v3/libpod/runtime_img.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_img.go index 52ac0d4d732..54eadf6b8e5 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_img.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_img.go @@ -10,9 +10,9 @@ import ( "github.com/containers/buildah/imagebuildah" "github.com/containers/common/libimage" "github.com/containers/image/v5/docker/reference" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -25,9 +25,6 @@ import ( // we can use the libpod-internal removal logic. func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage.RemoveContainerFunc { return func(imageID string) error { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return define.ErrRuntimeStopped } @@ -36,10 +33,21 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage return err } for _, ctr := range ctrs { - if ctr.config.RootfsImageID == imageID { - var timeout *uint + if ctr.config.RootfsImageID != imageID { + continue + } + var timeout *uint + if ctr.config.IsInfra { + pod, err := r.state.Pod(ctr.config.Pod) + if err != nil { + return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), pod.ID()) + } + if err := r.removePod(ctx, pod, true, true, timeout); err != nil { + return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID()) + } + } else { if err := r.removeContainer(ctx, ctr, true, false, false, timeout); err != nil { - return errors.Wrapf(err, "error removing image %s: container %s using image could not be removed", imageID, ctr.ID()) + return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID()) } } } @@ -85,6 +93,8 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions, if options.Runtime == "" { options.Runtime = r.GetOCIRuntimePath() } + // share the network interface between podman and buildah + options.NetworkInterface = r.network id, ref, err := imagebuildah.BuildDockerfiles(ctx, r.store, options, dockerfiles...) // Write event for build completion r.newImageBuildCompleteEvent(id) diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_migrate.go b/vendor/github.com/containers/podman/v4/libpod/runtime_migrate.go similarity index 92% rename from vendor/github.com/containers/podman/v3/libpod/runtime_migrate.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_migrate.go index 087991e6f48..f56cb83a42d 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_migrate.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_migrate.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package libpod import ( - "context" "fmt" "io/ioutil" "os" @@ -11,9 +11,9 @@ import ( "strconv" "syscall" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -45,7 +45,7 @@ func (r *Runtime) stopPauseProcess() error { return nil } -func (r *Runtime) migrate(ctx context.Context) error { +func (r *Runtime) migrate() error { runningContainers, err := r.GetRunningContainers() if err != nil { return err diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_pod.go b/vendor/github.com/containers/podman/v4/libpod/runtime_pod.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/runtime_pod.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_pod.go index 2389ee6d948..dca0ffc8abb 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_pod.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_pod.go @@ -4,8 +4,8 @@ import ( "context" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/util" "github.com/pkg/errors" ) @@ -27,9 +27,6 @@ type PodFilter func(*Pod) bool // being removed // Otherwise, the pod will not be removed if any containers are running func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return define.ErrRuntimeStopped } @@ -50,9 +47,6 @@ func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool, // GetPod retrieves a pod by its ID func (r *Runtime) GetPod(id string) (*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -62,9 +56,6 @@ func (r *Runtime) GetPod(id string) (*Pod, error) { // HasPod checks to see if a pod with the given ID exists func (r *Runtime) HasPod(id string) (bool, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return false, define.ErrRuntimeStopped } @@ -75,9 +66,6 @@ func (r *Runtime) HasPod(id string) (bool, error) { // LookupPod retrieves a pod by its name or a partial ID // If a partial ID is not unique, an error will be returned func (r *Runtime) LookupPod(idOrName string) (*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -111,9 +99,6 @@ func (r *Runtime) Pods(filters ...PodFilter) ([]*Pod, error) { // GetAllPods retrieves all pods func (r *Runtime) GetAllPods() ([]*Pod, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -148,9 +133,6 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) { pods []string runningPods []*Pod ) - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_pod_linux.go b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go similarity index 69% rename from vendor/github.com/containers/podman/v3/libpod/runtime_pod_linux.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go index 9c6f1539fd6..62ec7df6021 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_pod_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod @@ -5,16 +6,17 @@ package libpod import ( "context" "fmt" + "os" "path" "path/filepath" "strings" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -22,9 +24,6 @@ import ( // NewPod makes a new, empty pod func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, options ...PodCreateOption) (_ *Pod, deferredErr error) { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -43,18 +42,6 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option } } - if pod.config.Name == "" { - name, err := r.generateName() - if err != nil { - return nil, err - } - pod.config.Name = name - } - - if p.InfraContainerSpec != nil && p.InfraContainerSpec.Hostname == "" { - p.InfraContainerSpec.Hostname = pod.config.Name - } - // Allocate a lock for the pod lock, err := r.lockManager.AllocateLock() if err != nil { @@ -73,51 +60,53 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option pod.valid = true - // Check CGroup parent sanity, and set it if it was not set - switch r.config.Engine.CgroupManager { - case config.CgroupfsCgroupsManager: - canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(pod.config.CgroupParent) - if canUseCgroup { + // Check Cgroup parent sanity, and set it if it was not set + if r.config.Cgroups() != "disabled" { + switch r.config.Engine.CgroupManager { + case config.CgroupfsCgroupsManager: + canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(pod.config.CgroupParent) + if canUseCgroup { + if pod.config.CgroupParent == "" { + pod.config.CgroupParent = CgroupfsDefaultCgroupParent + } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { + return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") + } + // If we are set to use pod cgroups, set the cgroup parent that + // all containers in the pod will share + // No need to create it with cgroupfs - the first container to + // launch should do it for us + if pod.config.UsePodCgroup { + pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID()) + if p.InfraContainerSpec != nil { + p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath + } + } + } + case config.SystemdCgroupsManager: if pod.config.CgroupParent == "" { - pod.config.CgroupParent = CgroupfsDefaultCgroupParent - } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(define.ErrInvalidArg, "systemd slice received as cgroup parent when using cgroupfs") + if rootless.IsRootless() { + pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent + } else { + pod.config.CgroupParent = SystemdDefaultCgroupParent + } + } else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { + return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") } // If we are set to use pod cgroups, set the cgroup parent that // all containers in the pod will share - // No need to create it with cgroupfs - the first container to - // launch should do it for us if pod.config.UsePodCgroup { - pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID()) + cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID())) + if err != nil { + return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID()) + } + pod.state.CgroupPath = cgroupPath if p.InfraContainerSpec != nil { p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath } } + default: + return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported Cgroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager) } - case config.SystemdCgroupsManager: - if pod.config.CgroupParent == "" { - if rootless.IsRootless() { - pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent - } else { - pod.config.CgroupParent = SystemdDefaultCgroupParent - } - } else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { - return nil, errors.Wrapf(define.ErrInvalidArg, "did not receive systemd slice as cgroup parent when using systemd to manage cgroups") - } - // If we are set to use pod cgroups, set the cgroup parent that - // all containers in the pod will share - if pod.config.UsePodCgroup { - cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID())) - if err != nil { - return nil, errors.Wrapf(err, "unable to create pod cgroup for pod %s", pod.ID()) - } - pod.state.CgroupPath = cgroupPath - if p.InfraContainerSpec != nil { - p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath - } - } - default: - return nil, errors.Wrapf(define.ErrInvalidArg, "unsupported CGroup manager: %s - cannot validate cgroup parent", r.config.Engine.CgroupManager) } if pod.config.UsePodCgroup { @@ -131,17 +120,38 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option logrus.Infof("Pod has an infra container, but shares no namespaces") } - if err := r.state.AddPod(pod); err != nil { - return nil, errors.Wrapf(err, "error adding pod to state") + // Unless the user has specified a name, use a randomly generated one. + // Note that name conflicts may occur (see #11735), so we need to loop. + generateName := pod.config.Name == "" + var addPodErr error + for { + if generateName { + name, err := r.generateName() + if err != nil { + return nil, err + } + pod.config.Name = name + } + + if p.InfraContainerSpec != nil && p.InfraContainerSpec.Hostname == "" { + p.InfraContainerSpec.Hostname = pod.config.Name + } + if addPodErr = r.state.AddPod(pod); addPodErr == nil { + return pod, nil + } + if !generateName || (errors.Cause(addPodErr) != define.ErrPodExists && errors.Cause(addPodErr) != define.ErrCtrExists) { + break + } } + if addPodErr != nil { + return nil, errors.Wrapf(addPodErr, "error adding pod to state") + } + return pod, nil } // AddInfra adds the created infra container to the pod state func (r *Runtime) AddInfra(ctx context.Context, pod *Pod, infraCtr *Container) (*Pod, error) { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -155,9 +165,6 @@ func (r *Runtime) AddInfra(ctx context.Context, pod *Pod, infraCtr *Container) ( // SavePod is a helper function to save the pod state from outside of libpod func (r *Runtime) SavePod(pod *Pod) error { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return define.ErrRuntimeStopped } @@ -177,10 +184,9 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, if err != nil { return err } - numCtrs := len(ctrs) - // If the only container in the pod is the pause container, remove the pod and container unconditionally. + // If the only running container in the pod is the pause container, remove the pod and container unconditionally. pauseCtrID := p.state.InfraContainerID if numCtrs == 1 && ctrs[0].ID() == pauseCtrID { removeCtrs = true @@ -193,10 +199,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, // Go through and lock all containers so we can operate on them all at // once. // First loop also checks that we are ready to go ahead and remove. + containersLocked := true for _, ctr := range ctrs { ctrLock := ctr.lock ctrLock.Lock() - defer ctrLock.Unlock() + defer func() { + if containersLocked { + ctrLock.Unlock() + } + }() // If we're force-removing, no need to check status. if force { @@ -215,12 +226,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, } // We're going to be removing containers. - // If we are CGroupfs cgroup driver, to avoid races, we need to hit - // the pod and conmon CGroups with a PID limit to prevent them from + // If we are Cgroupfs cgroup driver, to avoid races, we need to hit + // the pod and conmon Cgroups with a PID limit to prevent them from // spawning any further processes (particularly cleanup processes) which - // would prevent removing the CGroups. + // would prevent removing the Cgroups. if p.runtime.config.Engine.CgroupManager == config.CgroupfsCgroupsManager { - // Get the conmon CGroup + // Get the conmon Cgroup conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") conmonCgroup, err := cgroups.Load(conmonCgroupPath) if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { @@ -234,7 +245,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, // Don't try if we failed to retrieve the cgroup if err == nil { - if err := conmonCgroup.Update(resLimits); err != nil { + if err := conmonCgroup.Update(resLimits); err != nil && !os.IsNotExist(err) { logrus.Warnf("Error updating pod %s conmon cgroup PID limit: %v", p.ID(), err) } } @@ -264,6 +275,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, } } + // Clear infra container ID before we remove the infra container. + // There is a potential issue if we don't do that, and removal is + // interrupted between RemoveAllContainers() below and the pod's removal + // later - we end up with a reference to a nonexistent infra container. + p.state.InfraContainerID = "" + if err := p.save(); err != nil { + return err + } + // Remove all containers in the pod from the state. if err := r.state.RemovePodContainers(p); err != nil { // If this fails, there isn't much more we can do. @@ -289,6 +309,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, } } + // let's unlock the containers so if there is any cleanup process, it can terminate its execution + for _, ctr := range ctrs { + ctr.lock.Unlock() + } + containersLocked = false + // Remove pod cgroup, if present if p.state.CgroupPath != "" { logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath) @@ -317,7 +343,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, } } if err == nil { - if err := conmonCgroup.Delete(); err != nil { + if err = conmonCgroup.Delete(); err != nil { if removalErr == nil { removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID()) } else { diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_renumber.go b/vendor/github.com/containers/podman/v4/libpod/runtime_renumber.go similarity index 97% rename from vendor/github.com/containers/podman/v3/libpod/runtime_renumber.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_renumber.go index b19cc921f8c..17e1d97e5ce 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_renumber.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_renumber.go @@ -1,7 +1,7 @@ package libpod import ( - "github.com/containers/podman/v3/libpod/events" + "github.com/containers/podman/v4/libpod/events" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_volume.go b/vendor/github.com/containers/podman/v4/libpod/runtime_volume.go similarity index 89% rename from vendor/github.com/containers/podman/v3/libpod/runtime_volume.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_volume.go index 2b3ad10b437..21bf8aefc28 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_volume.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_volume.go @@ -3,9 +3,9 @@ package libpod import ( "context" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/pkg/domain/entities/reports" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/domain/entities/reports" "github.com/pkg/errors" ) @@ -22,9 +22,6 @@ type VolumeFilter func(*Volume) bool // RemoveVolume removes a volumes func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeout *uint) error { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return define.ErrRuntimeStopped } @@ -41,9 +38,6 @@ func (r *Runtime) RemoveVolume(ctx context.Context, v *Volume, force bool, timeo // GetVolume retrieves a volume given its full name. func (r *Runtime) GetVolume(name string) (*Volume, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -58,9 +52,6 @@ func (r *Runtime) GetVolume(name string) (*Volume, error) { // LookupVolume retrieves a volume by unambiguous partial name. func (r *Runtime) LookupVolume(name string) (*Volume, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -75,9 +66,6 @@ func (r *Runtime) LookupVolume(name string) (*Volume, error) { // HasVolume checks to see if a volume with the given name exists func (r *Runtime) HasVolume(name string) (bool, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return false, define.ErrRuntimeStopped } @@ -90,9 +78,6 @@ func (r *Runtime) HasVolume(name string) (bool, error) { // output. If multiple filters are used, a volume will be returned if // any of the filters are matched func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } @@ -123,9 +108,6 @@ func (r *Runtime) Volumes(filters ...VolumeFilter) ([]*Volume, error) { // GetAllVolumes retrieves all the volumes func (r *Runtime) GetAllVolumes() ([]*Volume, error) { - r.lock.RLock() - defer r.lock.RUnlock() - if !r.valid { return nil, define.ErrRuntimeStopped } diff --git a/vendor/github.com/containers/podman/v3/libpod/runtime_volume_linux.go b/vendor/github.com/containers/podman/v4/libpod/runtime_volume_linux.go similarity index 73% rename from vendor/github.com/containers/podman/v3/libpod/runtime_volume_linux.go rename to vendor/github.com/containers/podman/v4/libpod/runtime_volume_linux.go index b0869352922..f8788e18307 100644 --- a/vendor/github.com/containers/podman/v3/libpod/runtime_volume_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_volume_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod @@ -9,9 +10,9 @@ import ( "strings" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - volplugin "github.com/containers/podman/v3/libpod/plugin" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + volplugin "github.com/containers/podman/v4/libpod/plugin" "github.com/containers/storage/drivers/quota" "github.com/containers/storage/pkg/stringid" pluginapi "github.com/docker/go-plugins-helpers/volume" @@ -21,21 +22,18 @@ import ( // NewVolume creates a new empty volume func (r *Runtime) NewVolume(ctx context.Context, options ...VolumeCreateOption) (*Volume, error) { - r.lock.Lock() - defer r.lock.Unlock() - if !r.valid { return nil, define.ErrRuntimeStopped } - return r.newVolume(ctx, options...) + return r.newVolume(options...) } // newVolume creates a new empty volume -func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) (_ *Volume, deferredErr error) { +func (r *Runtime) newVolume(options ...VolumeCreateOption) (_ *Volume, deferredErr error) { volume := newVolume(r) for _, option := range options { if err := option(volume); err != nil { - return nil, errors.Wrapf(err, "error running volume create option") + return nil, errors.Wrapf(err, "running volume create option") } } @@ -50,7 +48,7 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) // Check if volume with given name exists. exists, err := r.state.HasVolume(volume.config.Name) if err != nil { - return nil, errors.Wrapf(err, "error checking if volume with name %s exists", volume.config.Name) + return nil, errors.Wrapf(err, "checking if volume with name %s exists", volume.config.Name) } if exists { return nil, errors.Wrapf(define.ErrVolumeExists, "volume with name %s already exists", volume.config.Name) @@ -67,9 +65,15 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) if volume.config.Driver == define.VolumeDriverLocal { logrus.Debugf("Validating options for local driver") // Validate options - for key := range volume.config.Options { - switch key { - case "device", "o", "type", "UID", "GID", "SIZE", "INODES": + for key, val := range volume.config.Options { + switch strings.ToLower(key) { + case "device": + if strings.ToLower(volume.config.Options["type"]) == "bind" { + if _, err := os.Stat(val); err != nil { + return nil, errors.Wrapf(err, "invalid volume option %s for driver 'local'", key) + } + } + case "o", "type", "uid", "gid", "size", "inodes", "noquota": // Do nothing, valid keys default: return nil, errors.Wrapf(define.ErrInvalidArg, "invalid mount option %s for driver 'local'", key) @@ -92,38 +96,43 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) // Create the mountpoint of this volume volPathRoot := filepath.Join(r.config.Engine.VolumePath, volume.config.Name) if err := os.MkdirAll(volPathRoot, 0700); err != nil { - return nil, errors.Wrapf(err, "error creating volume directory %q", volPathRoot) + return nil, errors.Wrapf(err, "creating volume directory %q", volPathRoot) } if err := os.Chown(volPathRoot, volume.config.UID, volume.config.GID); err != nil { - return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID) + return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", volPathRoot, volume.config.UID, volume.config.GID) } fullVolPath := filepath.Join(volPathRoot, "_data") if err := os.MkdirAll(fullVolPath, 0755); err != nil { - return nil, errors.Wrapf(err, "error creating volume directory %q", fullVolPath) + return nil, errors.Wrapf(err, "creating volume directory %q", fullVolPath) } if err := os.Chown(fullVolPath, volume.config.UID, volume.config.GID); err != nil { - return nil, errors.Wrapf(err, "error chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID) + return nil, errors.Wrapf(err, "chowning volume directory %q to %d:%d", fullVolPath, volume.config.UID, volume.config.GID) } if err := LabelVolumePath(fullVolPath); err != nil { return nil, err } - projectQuotaSupported := false - - q, err := quota.NewControl(r.config.Engine.VolumePath) - if err == nil { - projectQuotaSupported = true - } - quota := quota.Quota{} - if volume.config.Size > 0 || volume.config.Inodes > 0 { - if !projectQuotaSupported { - return nil, errors.New("Volume options size and inodes not supported. Filesystem does not support Project Quota") + if volume.config.DisableQuota { + if volume.config.Size > 0 || volume.config.Inodes > 0 { + return nil, errors.New("volume options size and inodes cannot be used without quota") } - quota.Size = volume.config.Size - quota.Inodes = volume.config.Inodes - } - if projectQuotaSupported { - if err := q.SetQuota(fullVolPath, quota); err != nil { - return nil, errors.Wrapf(err, "failed to set size quota size=%d inodes=%d for volume directory %q", volume.config.Size, volume.config.Inodes, fullVolPath) + } else { + projectQuotaSupported := false + q, err := quota.NewControl(r.config.Engine.VolumePath) + if err == nil { + projectQuotaSupported = true + } + quota := quota.Quota{} + if volume.config.Size > 0 || volume.config.Inodes > 0 { + if !projectQuotaSupported { + return nil, errors.New("volume options size and inodes not supported. Filesystem does not support Project Quota") + } + quota.Size = volume.config.Size + quota.Inodes = volume.config.Inodes + } + if projectQuotaSupported { + if err := q.SetQuota(fullVolPath, quota); err != nil { + return nil, errors.Wrapf(err, "failed to set size quota size=%d inodes=%d for volume directory %q", volume.config.Size, volume.config.Inodes, fullVolPath) + } } } @@ -132,7 +141,7 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) lock, err := r.lockManager.AllocateLock() if err != nil { - return nil, errors.Wrapf(err, "error allocating lock for new volume") + return nil, errors.Wrapf(err, "allocating lock for new volume") } volume.lock = lock volume.config.LockID = volume.lock.ID() @@ -149,7 +158,7 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption) // Add the volume to state if err := r.state.AddVolume(volume); err != nil { - return nil, errors.Wrapf(err, "error adding volume to state") + return nil, errors.Wrapf(err, "adding volume to state") } defer volume.newVolumeEvent(events.Create) return volume, nil @@ -181,7 +190,7 @@ func makeVolumeInPluginIfNotExist(name string, options map[string]string, plugin createReq.Name = name createReq.Options = options if err := plugin.CreateVolume(createReq); err != nil { - return errors.Wrapf(err, "error creating volume %q in plugin %s", name, plugin.Name) + return errors.Wrapf(err, "creating volume %q in plugin %s", name, plugin.Name) } } @@ -225,17 +234,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo continue } - return errors.Wrapf(err, "error removing container %s that depends on volume %s", dep, v.Name()) + return errors.Wrapf(err, "removing container %s that depends on volume %s", dep, v.Name()) } logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name()) - // TODO: do we want to set force here when removing - // containers? - // I'm inclined to say no, in case someone accidentally - // wipes a container they're using... - if err := r.removeContainer(ctx, ctr, false, false, false, timeout); err != nil { - return errors.Wrapf(err, "error removing container %s that depends on volume %s", ctr.ID(), v.Name()) + if err := r.removeContainer(ctx, ctr, force, false, false, timeout); err != nil { + return errors.Wrapf(err, "removing container %s that depends on volume %s", ctr.ID(), v.Name()) } } } @@ -248,7 +253,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo // them. logrus.Errorf("Unmounting volume %s: %v", v.Name(), err) } else { - return errors.Wrapf(err, "error unmounting volume %s", v.Name()) + return errors.Wrapf(err, "unmounting volume %s", v.Name()) } } @@ -292,13 +297,13 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo if removalErr != nil { logrus.Errorf("Removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr) } - return errors.Wrapf(err, "error removing volume %s", v.Name()) + return errors.Wrapf(err, "removing volume %s", v.Name()) } // Free the volume's lock if err := v.lock.Free(); err != nil { if removalErr == nil { - removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name()) + removalErr = errors.Wrapf(err, "freeing lock for volume %s", v.Name()) } else { logrus.Errorf("Freeing lock for volume %q: %v", v.Name(), err) } @@ -308,7 +313,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo // from /var/lib/containers/storage/volumes if err := v.teardownStorage(); err != nil { if removalErr == nil { - removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name()) + removalErr = errors.Wrapf(err, "cleaning up volume storage for %q", v.Name()) } else { logrus.Errorf("Cleaning up volume storage for volume %q: %v", v.Name(), err) } diff --git a/vendor/github.com/containers/podman/v3/libpod/shutdown/handler.go b/vendor/github.com/containers/podman/v4/libpod/shutdown/handler.go similarity index 79% rename from vendor/github.com/containers/podman/v3/libpod/shutdown/handler.go rename to vendor/github.com/containers/podman/v4/libpod/shutdown/handler.go index cca74c3c4a0..9add05c9c97 100644 --- a/vendor/github.com/containers/podman/v3/libpod/shutdown/handler.go +++ b/vendor/github.com/containers/podman/v4/libpod/shutdown/handler.go @@ -5,9 +5,10 @@ import ( "os/signal" "sync" "syscall" + "time" "github.com/pkg/errors" - "github.com/sirupsen/logrus" + logrusImport "github.com/sirupsen/logrus" ) var ( @@ -25,6 +26,7 @@ var ( // Ordering that on-shutdown handlers will be invoked. handlerOrder []string shutdownInhibit sync.RWMutex + logrus = logrusImport.WithField("PID", os.Getpid()) ) // Start begins handling SIGTERM and SIGINT and will run the given on-signal @@ -44,25 +46,31 @@ func Start() error { go func() { select { case <-cancelChan: + logrus.Infof("Received shutdown.Stop(), terminating!") signal.Stop(sigChan) close(sigChan) close(cancelChan) stopped = true return case sig := <-sigChan: - logrus.Infof("Received shutdown signal %v, terminating!", sig) + logrus.Infof("Received shutdown signal %q, terminating!", sig.String()) shutdownInhibit.Lock() handlerLock.Lock() + for _, name := range handlerOrder { handler, ok := handlers[name] if !ok { - logrus.Errorf("Shutdown handler %s definition not found!", name) + logrus.Errorf("Shutdown handler %q definition not found!", name) continue } - logrus.Infof("Invoking shutdown handler %s", name) + + logrus.Infof("Invoking shutdown handler %q", name) + start := time.Now() if err := handler(sig); err != nil { - logrus.Errorf("Running shutdown handler %s: %v", name, err) + logrus.Errorf("Running shutdown handler %q: %v", name, err) } + logrus.Debugf("Completed shutdown handler %q, duration %v", name, + time.Since(start).Round(time.Second)) } handlerLock.Unlock() shutdownInhibit.Unlock() @@ -87,12 +95,12 @@ func Stop() error { return nil } -// Temporarily inhibit signals from shutting down Libpod. +// Inhibit temporarily inhibit signals from shutting down Libpod. func Inhibit() { shutdownInhibit.RLock() } -// Stop inhibiting signals from shutting down Libpod. +// Uninhibit stop inhibiting signals from shutting down Libpod. func Uninhibit() { shutdownInhibit.RUnlock() } diff --git a/vendor/github.com/containers/podman/v3/libpod/state.go b/vendor/github.com/containers/podman/v4/libpod/state.go similarity index 96% rename from vendor/github.com/containers/podman/v3/libpod/state.go rename to vendor/github.com/containers/podman/v4/libpod/state.go index 4b711bae921..4710237693e 100644 --- a/vendor/github.com/containers/podman/v3/libpod/state.go +++ b/vendor/github.com/containers/podman/v4/libpod/state.go @@ -1,5 +1,7 @@ package libpod +import "github.com/containers/common/libnetwork/types" + // State is a storage backend for libpod's current state. // A State is only initialized once per instance of libpod. // As such, initialization methods for State implementations may safely assume @@ -99,14 +101,9 @@ type State interface { AllContainers() ([]*Container, error) // Get networks the container is currently connected to. - GetNetworks(ctr *Container) ([]string, error) - // Get network aliases for the given container in the given network. - GetNetworkAliases(ctr *Container, network string) ([]string, error) - // Get all network aliases for the given container. - GetAllNetworkAliases(ctr *Container) (map[string][]string, error) - // Add the container to the given network, adding the given aliases - // (if present). - NetworkConnect(ctr *Container, network string, aliases []string) error + GetNetworks(ctr *Container) (map[string]types.PerNetworkOptions, error) + // Add the container to the given network with the given options + NetworkConnect(ctr *Container, network string, opts types.PerNetworkOptions) error // Remove the container from the given network, removing all aliases for // the container in that network in the process. NetworkDisconnect(ctr *Container, network string) error diff --git a/vendor/github.com/containers/podman/v3/libpod/stats.go b/vendor/github.com/containers/podman/v4/libpod/stats.go similarity index 75% rename from vendor/github.com/containers/podman/v3/libpod/stats.go rename to vendor/github.com/containers/podman/v4/libpod/stats.go index 97515253543..25baa378d4b 100644 --- a/vendor/github.com/containers/podman/v3/libpod/stats.go +++ b/vendor/github.com/containers/podman/v4/libpod/stats.go @@ -1,18 +1,22 @@ +//go:build linux // +build linux package libpod import ( + "math" "strings" "syscall" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/libpod/define" "github.com/pkg/errors" ) -// GetContainerStats gets the running stats for a given container +// GetContainerStats gets the running stats for a given container. +// The previousStats is used to correctly calculate cpu percentages. You +// should pass nil if there is no previous stat for this container. func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*define.ContainerStats, error) { stats := new(define.ContainerStats) stats.ContainerID = c.ID() @@ -34,6 +38,14 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de return stats, define.ErrCtrStateInvalid } + if previousStats == nil { + previousStats = &define.ContainerStats{ + // if we have no prev stats use the container start time as prev time + // otherwise we cannot correctly calculate the CPU percentage + SystemNano: uint64(c.state.StartedTime.UnixNano()), + } + } + cgroupPath, err := c.cGroupPath() if err != nil { return nil, err @@ -65,10 +77,10 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de stats.Duration = cgroupStats.CPU.Usage.Total stats.UpTime = time.Duration(stats.Duration) stats.CPU = calculateCPUPercent(cgroupStats, previousCPU, now, previousStats.SystemNano) - stats.AvgCPU = calculateAvgCPU(stats.CPU, previousStats.AvgCPU, previousStats.DataPoints) - stats.DataPoints = previousStats.DataPoints + 1 + // calc the average cpu usage for the time the container is running + stats.AvgCPU = calculateCPUPercent(cgroupStats, 0, now, uint64(c.state.StartedTime.UnixNano())) stats.MemUsage = cgroupStats.Memory.Usage.Usage - stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit) + stats.MemLimit = c.getMemLimit() stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100 stats.PIDs = 0 if conState == define.ContainerStateRunning || conState == define.ContainerStatePaused { @@ -91,22 +103,29 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de return stats, nil } -// getMemory limit returns the memory limit for a given cgroup -// If the configured memory limit is larger than the total memory on the sys, the -// physical system memory size is returned -func getMemLimit(cgroupLimit uint64) uint64 { +// getMemory limit returns the memory limit for a container +func (c *Container) getMemLimit() uint64 { + memLimit := uint64(math.MaxUint64) + + if c.config.Spec.Linux != nil && c.config.Spec.Linux.Resources != nil && + c.config.Spec.Linux.Resources.Memory != nil && c.config.Spec.Linux.Resources.Memory.Limit != nil { + memLimit = uint64(*c.config.Spec.Linux.Resources.Memory.Limit) + } + si := &syscall.Sysinfo_t{} err := syscall.Sysinfo(si) if err != nil { - return cgroupLimit + return memLimit } //nolint:unconvert physicalLimit := uint64(si.Totalram) - if cgroupLimit > physicalLimit { + + if memLimit <= 0 || memLimit > physicalLimit { return physicalLimit } - return cgroupLimit + + return memLimit } // calculateCPUPercent calculates the cpu usage using the latest measurement in stats. @@ -137,9 +156,3 @@ func calculateBlockIO(stats *cgroups.Metrics) (read uint64, write uint64) { } return } - -// calculateAvgCPU calculates the avg CPU percentage given the previous average and the number of data points. -func calculateAvgCPU(statsCPU float64, prevAvg float64, prevData int64) float64 { - avgPer := ((prevAvg * float64(prevData)) + statsCPU) / (float64(prevData) + 1) - return avgPer -} diff --git a/vendor/github.com/containers/podman/v3/libpod/storage.go b/vendor/github.com/containers/podman/v4/libpod/storage.go similarity index 99% rename from vendor/github.com/containers/podman/v3/libpod/storage.go rename to vendor/github.com/containers/podman/v4/libpod/storage.go index 5c265df40d3..a85348729bc 100644 --- a/vendor/github.com/containers/podman/v3/libpod/storage.go +++ b/vendor/github.com/containers/podman/v4/libpod/storage.go @@ -6,7 +6,7 @@ import ( istorage "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" v1 "github.com/opencontainers/image-spec/specs-go/v1" diff --git a/vendor/github.com/containers/podman/v3/libpod/util.go b/vendor/github.com/containers/podman/v4/libpod/util.go similarity index 86% rename from vendor/github.com/containers/podman/v3/libpod/util.go rename to vendor/github.com/containers/podman/v4/libpod/util.go index 8f8303ff22b..1753b4f347a 100644 --- a/vendor/github.com/containers/podman/v3/libpod/util.go +++ b/vendor/github.com/containers/podman/v4/libpod/util.go @@ -13,10 +13,10 @@ import ( "strings" "time" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/utils" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/utils" "github.com/fsnotify/fsnotify" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" @@ -55,7 +55,11 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e if err := watcher.Add(filepath.Dir(path)); err == nil { inotifyEvents = watcher.Events } - defer watcher.Close() + defer func() { + if err := watcher.Close(); err != nil { + logrus.Errorf("Failed to close fsnotify watcher: %v", err) + } + }() } var timeoutChan <-chan time.Time @@ -148,6 +152,18 @@ func queryPackageVersion(cmdArg ...string) string { cmd := exec.Command(cmdArg[0], cmdArg[1:]...) if outp, err := cmd.Output(); err == nil { output = string(outp) + if cmdArg[0] == "/usr/bin/dpkg" { + r := strings.Split(output, ": ") + queryFormat := `${Package}_${Version}_${Architecture}` + cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) + if outp, err := cmd.Output(); err == nil { + output = string(outp) + } + } + } + if cmdArg[0] == "/sbin/apk" { + prefix := cmdArg[len(cmdArg)-1] + " is owned by " + output = strings.Replace(output, prefix, "", 1) } } return strings.Trim(output, "\n") @@ -156,10 +172,11 @@ func queryPackageVersion(cmdArg ...string) string { func packageVersion(program string) string { // program is full path packagers := [][]string{ {"/usr/bin/rpm", "-q", "-f"}, - {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu - {"/usr/bin/pacman", "-Qo"}, // Arch - {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) - {"/usr/bin/equery", "b"}, // Gentoo (slow) + {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu + {"/usr/bin/pacman", "-Qo"}, // Arch + {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) + {"/usr/bin/equery", "b"}, // Gentoo (slow) + {"/sbin/apk", "info", "-W"}, // Alpine } for _, cmd := range packagers { @@ -295,19 +312,21 @@ func writeHijackHeader(r *http.Request, conn io.Writer) { } // Convert OCICNI port bindings into Inspect-formatted port bindings. -func makeInspectPortBindings(bindings []types.OCICNIPortMapping, expose map[uint16][]string) map[string][]define.InspectHostPort { +func makeInspectPortBindings(bindings []types.PortMapping, expose map[uint16][]string) map[string][]define.InspectHostPort { portBindings := make(map[string][]define.InspectHostPort) for _, port := range bindings { - key := fmt.Sprintf("%d/%s", port.ContainerPort, port.Protocol) - hostPorts := portBindings[key] - if hostPorts == nil { - hostPorts = []define.InspectHostPort{} + protocols := strings.Split(port.Protocol, ",") + for _, protocol := range protocols { + for i := uint16(0); i < port.Range; i++ { + key := fmt.Sprintf("%d/%s", port.ContainerPort+i, protocol) + hostPorts := portBindings[key] + hostPorts = append(hostPorts, define.InspectHostPort{ + HostIP: port.HostIP, + HostPort: fmt.Sprintf("%d", port.HostPort+i), + }) + portBindings[key] = hostPorts + } } - hostPorts = append(hostPorts, define.InspectHostPort{ - HostIP: port.HostIP, - HostPort: fmt.Sprintf("%d", port.HostPort), - }) - portBindings[key] = hostPorts } // add exposed ports without host port information to match docker for port, protocols := range expose { diff --git a/vendor/github.com/containers/podman/v3/libpod/util_linux.go b/vendor/github.com/containers/podman/v4/libpod/util_linux.go similarity index 93% rename from vendor/github.com/containers/podman/v3/libpod/util_linux.go rename to vendor/github.com/containers/podman/v4/libpod/util_linux.go index e2ea97185a0..fe98056dcf0 100644 --- a/vendor/github.com/containers/podman/v3/libpod/util_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/util_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod @@ -7,9 +8,9 @@ import ( "strings" "syscall" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -43,7 +44,7 @@ func getDefaultSystemdCgroup() string { return SystemdDefaultCgroupParent } -// makeSystemdCgroup creates a systemd CGroup at the given location. +// makeSystemdCgroup creates a systemd Cgroup at the given location. func makeSystemdCgroup(path string) error { controller, err := cgroups.NewSystemd(getDefaultSystemdCgroup()) if err != nil { diff --git a/vendor/github.com/containers/podman/v3/libpod/volume.go b/vendor/github.com/containers/podman/v4/libpod/volume.go similarity index 91% rename from vendor/github.com/containers/podman/v3/libpod/volume.go rename to vendor/github.com/containers/podman/v4/libpod/volume.go index 90b423f1d58..ab461a37f51 100644 --- a/vendor/github.com/containers/podman/v3/libpod/volume.go +++ b/vendor/github.com/containers/podman/v4/libpod/volume.go @@ -1,13 +1,12 @@ package libpod import ( - "os" - "path/filepath" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/lock" - "github.com/containers/podman/v3/libpod/plugin" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/lock" + "github.com/containers/podman/v4/libpod/plugin" + "github.com/containers/podman/v4/pkg/util" ) // Volume is a libpod named volume. @@ -53,6 +52,9 @@ type VolumeConfig struct { Size uint64 `json:"size"` // Inodes maximum of the volume. Inodes uint64 `json:"inodes"` + // DisableQuota indicates that the volume should completely disable using any + // quota tracking. + DisableQuota bool `json:"disableQuota,omitempty"` } // VolumeState holds the volume's mutable state. @@ -93,14 +95,7 @@ func (v *Volume) Name() string { // Returns the size on disk of volume func (v *Volume) Size() (uint64, error) { - var size uint64 - err := filepath.Walk(v.config.MountPoint, func(path string, info os.FileInfo, err error) error { - if err == nil && !info.IsDir() { - size += (uint64)(info.Size()) - } - return err - }) - return size, err + return util.SizeOfPath(v.config.MountPoint) } // Driver retrieves the volume's driver. @@ -255,3 +250,16 @@ func (v *Volume) IsDangling() (bool, error) { func (v *Volume) UsesVolumeDriver() bool { return !(v.config.Driver == define.VolumeDriverLocal || v.config.Driver == "") } + +func (v *Volume) Mount() (string, error) { + v.lock.Lock() + defer v.lock.Unlock() + err := v.mount() + return v.config.MountPoint, err +} + +func (v *Volume) Unmount() error { + v.lock.Lock() + defer v.lock.Unlock() + return v.unmount(false) +} diff --git a/vendor/github.com/containers/podman/v3/libpod/volume_inspect.go b/vendor/github.com/containers/podman/v4/libpod/volume_inspect.go similarity index 90% rename from vendor/github.com/containers/podman/v3/libpod/volume_inspect.go rename to vendor/github.com/containers/podman/v4/libpod/volume_inspect.go index c3f51222d17..3d721410bde 100644 --- a/vendor/github.com/containers/podman/v3/libpod/volume_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/volume_inspect.go @@ -1,7 +1,7 @@ package libpod import ( - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -60,6 +60,9 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) { data.UID = v.uid() data.GID = v.gid() data.Anonymous = v.config.IsAnon + data.MountCount = v.state.MountCount + data.NeedsCopyUp = v.state.NeedsCopyUp + data.NeedsChown = v.state.NeedsChown return data, nil } diff --git a/vendor/github.com/containers/podman/v3/libpod/volume_internal.go b/vendor/github.com/containers/podman/v4/libpod/volume_internal.go similarity index 91% rename from vendor/github.com/containers/podman/v3/libpod/volume_internal.go rename to vendor/github.com/containers/podman/v4/libpod/volume_internal.go index f69f1c04491..e0ebb729d73 100644 --- a/vendor/github.com/containers/podman/v3/libpod/volume_internal.go +++ b/vendor/github.com/containers/podman/v4/libpod/volume_internal.go @@ -4,7 +4,7 @@ import ( "os" "path/filepath" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/pkg/errors" ) @@ -52,6 +52,9 @@ func (v *Volume) needsMount() bool { if _, ok := v.config.Options["SIZE"]; ok { index++ } + if _, ok := v.config.Options["NOQUOTA"]; ok { + index++ + } // when uid or gid is set there is also the "o" option // set so we have to ignore this one as well if index > 0 { @@ -81,7 +84,7 @@ func (v *Volume) save() error { func (v *Volume) refresh() error { lock, err := v.runtime.lockManager.AllocateAndRetrieveLock(v.config.LockID) if err != nil { - return errors.Wrapf(err, "error acquiring lock %d for volume %s", v.config.LockID, v.Name()) + return errors.Wrapf(err, "acquiring lock %d for volume %s", v.config.LockID, v.Name()) } v.lock = lock diff --git a/vendor/github.com/containers/podman/v3/libpod/volume_internal_linux.go b/vendor/github.com/containers/podman/v4/libpod/volume_internal_linux.go similarity index 80% rename from vendor/github.com/containers/podman/v3/libpod/volume_internal_linux.go rename to vendor/github.com/containers/podman/v4/libpod/volume_internal_linux.go index 45cd2238541..7d7dea9d084 100644 --- a/vendor/github.com/containers/podman/v3/libpod/volume_internal_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/volume_internal_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package libpod @@ -6,8 +7,7 @@ import ( "os/exec" "strings" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -32,13 +32,6 @@ func (v *Volume) mount() error { return nil } - // We cannot mount 'local' volumes as rootless. - if !v.UsesVolumeDriver() && rootless.IsRootless() { - // This check should only be applied to 'local' driver - // so Volume Drivers must be excluded - return errors.Wrapf(define.ErrRootless, "cannot mount volumes without root privileges") - } - // Update the volume from the DB to get an accurate mount counter. if err := v.update(); err != nil { return err @@ -90,22 +83,27 @@ func (v *Volume) mount() error { // TODO: might want to cache this path in the runtime? mountPath, err := exec.LookPath("mount") if err != nil { - return errors.Wrapf(err, "error locating 'mount' binary") + return errors.Wrapf(err, "locating 'mount' binary") } mountArgs := []string{} if volOptions != "" { mountArgs = append(mountArgs, "-o", volOptions) } - if volType != "" { + switch volType { + case "": + case "bind": + mountArgs = append(mountArgs, "-o", volType) + default: mountArgs = append(mountArgs, "-t", volType) } + mountArgs = append(mountArgs, volDevice, v.config.MountPoint) mountCmd := exec.Command(mountPath, mountArgs...) logrus.Debugf("Running mount command: %s %s", mountPath, strings.Join(mountArgs, " ")) if output, err := mountCmd.CombinedOutput(); err != nil { logrus.Debugf("Mount %v failed with %v", mountCmd, err) - return errors.Wrapf(errors.Errorf(string(output)), "error mounting volume %s", v.Name()) + return errors.Errorf(string(output)) } logrus.Debugf("Mounted volume %s", v.Name()) @@ -139,20 +137,6 @@ func (v *Volume) unmount(force bool) error { return nil } - // We cannot unmount 'local' volumes as rootless. - if !v.UsesVolumeDriver() && rootless.IsRootless() { - // If force is set, just clear the counter and bail without - // error, so we can remove volumes from the state if they are in - // an awkward configuration. - if force { - logrus.Errorf("Volume %s is mounted despite being rootless - state is not sane", v.Name()) - v.state.MountCount = 0 - return v.save() - } - - return errors.Wrapf(define.ErrRootless, "cannot mount or unmount volumes without root privileges") - } - if !force { v.state.MountCount-- } else { @@ -184,7 +168,7 @@ func (v *Volume) unmount(force bool) error { // Ignore EINVAL - the mount no longer exists. return nil } - return errors.Wrapf(err, "error unmounting volume %s", v.Name()) + return errors.Wrapf(err, "unmounting volume %s", v.Name()) } logrus.Debugf("Unmounted volume %s", v.Name()) } diff --git a/vendor/github.com/containers/podman/v3/pkg/annotations/annotations.go b/vendor/github.com/containers/podman/v4/pkg/annotations/annotations.go similarity index 59% rename from vendor/github.com/containers/podman/v3/pkg/annotations/annotations.go rename to vendor/github.com/containers/podman/v4/pkg/annotations/annotations.go index 8badab20d00..a22222f1047 100644 --- a/vendor/github.com/containers/podman/v3/pkg/annotations/annotations.go +++ b/vendor/github.com/containers/podman/v4/pkg/annotations/annotations.go @@ -1,122 +1,122 @@ package annotations const ( - // Annotations carries the received Kubelet annotations + // Annotations carries the received Kubelet annotations. Annotations = "io.kubernetes.cri-o.Annotations" - // ContainerID is the container ID annotation + // ContainerID is the container ID annotation. ContainerID = "io.kubernetes.cri-o.ContainerID" - // ContainerName is the container name annotation + // ContainerName is the container name annotation. ContainerName = "io.kubernetes.cri-o.ContainerName" - // ContainerType is the container type (sandbox or container) annotation + // ContainerType is the container type (sandbox or container) annotation. ContainerType = "io.kubernetes.cri-o.ContainerType" - // Created is the container creation time annotation + // Created is the container creation time annotation. Created = "io.kubernetes.cri-o.Created" - // HostName is the container host name annotation + // HostName is the container host name annotation. HostName = "io.kubernetes.cri-o.HostName" - // CgroupParent is the sandbox cgroup parent + // CgroupParent is the sandbox cgroup parent. CgroupParent = "io.kubernetes.cri-o.CgroupParent" - // IP is the container ipv4 or ipv6 address + // IP is the container ipv4 or ipv6 address. IP = "io.kubernetes.cri-o.IP" - // NamespaceOptions store the options for namespaces + // NamespaceOptions store the options for namespaces. NamespaceOptions = "io.kubernetes.cri-o.NamespaceOptions" - // SeccompProfilePath is the node seccomp profile path + // SeccompProfilePath is the node seccomp profile path. SeccompProfilePath = "io.kubernetes.cri-o.SeccompProfilePath" - // Image is the container image ID annotation + // Image is the container image ID annotation. Image = "io.kubernetes.cri-o.Image" - // ImageName is the container image name annotation + // ImageName is the container image name annotation. ImageName = "io.kubernetes.cri-o.ImageName" - // ImageRef is the container image ref annotation + // ImageRef is the container image ref annotation. ImageRef = "io.kubernetes.cri-o.ImageRef" - // KubeName is the kubernetes name annotation + // KubeName is the kubernetes name annotation. KubeName = "io.kubernetes.cri-o.KubeName" - // PortMappings holds the port mappings for the sandbox + // PortMappings holds the port mappings for the sandbox. PortMappings = "io.kubernetes.cri-o.PortMappings" - // Labels are the kubernetes labels annotation + // Labels are the kubernetes labels annotation. Labels = "io.kubernetes.cri-o.Labels" - // LogPath is the container logging path annotation + // LogPath is the container logging path annotation. LogPath = "io.kubernetes.cri-o.LogPath" - // Metadata is the container metadata annotation + // Metadata is the container metadata annotation. Metadata = "io.kubernetes.cri-o.Metadata" - // Name is the pod name annotation + // Name is the pod name annotation. Name = "io.kubernetes.cri-o.Name" - // Namespace is the pod namespace annotation + // Namespace is the pod namespace annotation. Namespace = "io.kubernetes.cri-o.Namespace" - // PrivilegedRuntime is the annotation for the privileged runtime path + // PrivilegedRuntime is the annotation for the privileged runtime path. PrivilegedRuntime = "io.kubernetes.cri-o.PrivilegedRuntime" - // ResolvPath is the resolver configuration path annotation + // ResolvPath is the resolver configuration path annotation. ResolvPath = "io.kubernetes.cri-o.ResolvPath" - // HostnamePath is the path to /etc/hostname to bind mount annotation + // HostnamePath is the path to /etc/hostname to bind mount annotation. HostnamePath = "io.kubernetes.cri-o.HostnamePath" - // SandboxID is the sandbox ID annotation + // SandboxID is the sandbox ID annotation. SandboxID = "io.kubernetes.cri-o.SandboxID" - // SandboxName is the sandbox name annotation + // SandboxName is the sandbox name annotation. SandboxName = "io.kubernetes.cri-o.SandboxName" - // ShmPath is the shared memory path annotation + // ShmPath is the shared memory path annotation. ShmPath = "io.kubernetes.cri-o.ShmPath" - // MountPoint is the mount point of the container rootfs + // MountPoint is the mount point of the container rootfs. MountPoint = "io.kubernetes.cri-o.MountPoint" - // RuntimeHandler is the annotation for runtime handler + // RuntimeHandler is the annotation for runtime handler. RuntimeHandler = "io.kubernetes.cri-o.RuntimeHandler" - // TTY is the terminal path annotation + // TTY is the terminal path annotation. TTY = "io.kubernetes.cri-o.TTY" - // Stdin is the stdin annotation + // Stdin is the stdin annotation. Stdin = "io.kubernetes.cri-o.Stdin" - // StdinOnce is the stdin_once annotation + // StdinOnce is the stdin_once annotation. StdinOnce = "io.kubernetes.cri-o.StdinOnce" - // Volumes is the volumes annotation + // Volumes is the volumes annotation. Volumes = "io.kubernetes.cri-o.Volumes" - // HostNetwork indicates whether the host network namespace is used or not + // HostNetwork indicates whether the host network namespace is used or not. HostNetwork = "io.kubernetes.cri-o.HostNetwork" - // CNIResult is the JSON string representation of the Result from CNI + // CNIResult is the JSON string representation of the Result from CNI. CNIResult = "io.kubernetes.cri-o.CNIResult" // ContainerManager is the annotation key for indicating the creator and - // manager of the container + // manager of the container. ContainerManager = "io.container.manager" ) // ContainerType values const ( - // ContainerTypeSandbox represents a pod sandbox container + // ContainerTypeSandbox represents a pod sandbox container. ContainerTypeSandbox = "sandbox" - // ContainerTypeContainer represents a container running within a pod + // ContainerTypeContainer represents a container running within a pod. ContainerTypeContainer = "container" ) // ContainerManagerLibpod indicates that libpod created and manages the -// container +// container. const ContainerManagerLibpod = "libpod" diff --git a/vendor/github.com/containers/podman/v3/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go similarity index 79% rename from vendor/github.com/containers/podman/v3/pkg/checkpoint/crutils/checkpoint_restore_utils.go rename to vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go index 3b77368bbe3..76c868ceea4 100644 --- a/vendor/github.com/containers/podman/v3/pkg/checkpoint/crutils/checkpoint_restore_utils.go +++ b/vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go @@ -3,11 +3,13 @@ package crutils import ( "bytes" "io" + "io/ioutil" "os" "os/exec" "path/filepath" metadata "github.com/checkpoint-restore/checkpointctl/lib" + "github.com/checkpoint-restore/go-criu/v5/stats" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -39,6 +41,36 @@ func CRImportCheckpointWithoutConfig(destination, input string) error { return nil } +// CRImportCheckpointConfigOnly only imports the checkpoint configuration +// from the checkpoint archive (input) into the directory destination. +// Only the files "config.dump" and "spec.dump" are extracted. +func CRImportCheckpointConfigOnly(destination, input string) error { + archiveFile, err := os.Open(input) + if err != nil { + return errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input) + } + + defer archiveFile.Close() + options := &archive.TarOptions{ + // Here we only need the files config.dump and spec.dump + ExcludePatterns: []string{ + "ctr.log", + "artifacts", + stats.StatsDump, + metadata.RootFsDiffTar, + metadata.DeletedFilesFile, + metadata.NetworkStatusFile, + metadata.CheckpointDirectory, + metadata.CheckpointVolumesDirectory, + }, + } + if err = archive.Untar(archiveFile, destination, options); err != nil { + return errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input) + } + + return nil +} + // CRRemoveDeletedFiles loads the list of deleted files and if // it exists deletes all files listed. func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) error { @@ -67,13 +99,12 @@ func CRRemoveDeletedFiles(id, baseDirectory, containerRootDirectory string) erro // root file system changes on top of containerRootDirectory func CRApplyRootFsDiffTar(baseDirectory, containerRootDirectory string) error { rootfsDiffPath := filepath.Join(baseDirectory, metadata.RootFsDiffTar) - if _, err := os.Stat(rootfsDiffPath); err != nil { - // Only do this if a rootfs-diff.tar actually exists - return nil - } - + // Only do this if a rootfs-diff.tar actually exists rootfsDiffFile, err := os.Open(rootfsDiffPath) if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } return errors.Wrap(err, "failed to open root file-system diff file") } defer rootfsDiffFile.Close() @@ -200,3 +231,26 @@ func CRRuntimeSupportsPodCheckpointRestore(runtimePath string) bool { out, _ := cmd.CombinedOutput() return bytes.Contains(out, []byte("flag needs an argument")) } + +// CRGetRuntimeFromArchive extracts the checkpoint metadata from the +// given checkpoint archive and returns the runtime used to create +// the given checkpoint archive. +func CRGetRuntimeFromArchive(input string) (*string, error) { + dir, err := ioutil.TempDir("", "checkpoint") + if err != nil { + return nil, err + } + defer os.RemoveAll(dir) + + if err := CRImportCheckpointConfigOnly(dir, input); err != nil { + return nil, err + } + + // Load config.dump from temporary directory + ctrConfig := new(metadata.ContainerConfig) + if _, err = metadata.ReadJSONFile(ctrConfig, dir, metadata.ConfigDumpFile); err != nil { + return nil, err + } + + return &ctrConfig.OCIRuntime, nil +} diff --git a/vendor/github.com/containers/podman/v3/pkg/copy/fileinfo.go b/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go similarity index 98% rename from vendor/github.com/containers/podman/v3/pkg/copy/fileinfo.go rename to vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go index fb711311c28..0ccca5b6ee0 100644 --- a/vendor/github.com/containers/podman/v3/pkg/copy/fileinfo.go +++ b/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go @@ -8,7 +8,7 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containers/podman/v3/pkg/copy/parse.go b/vendor/github.com/containers/podman/v4/pkg/copy/parse.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/copy/parse.go rename to vendor/github.com/containers/podman/v4/pkg/copy/parse.go diff --git a/vendor/github.com/containers/podman/v3/pkg/criu/criu.go b/vendor/github.com/containers/podman/v4/pkg/criu/criu.go similarity index 55% rename from vendor/github.com/containers/podman/v3/pkg/criu/criu.go rename to vendor/github.com/containers/podman/v4/pkg/criu/criu.go index 2a6805979df..6570159d7ee 100644 --- a/vendor/github.com/containers/podman/v3/pkg/criu/criu.go +++ b/vendor/github.com/containers/podman/v4/pkg/criu/criu.go @@ -1,7 +1,13 @@ +//go:build linux +// +build linux + package criu import ( "github.com/checkpoint-restore/go-criu/v5" + "github.com/checkpoint-restore/go-criu/v5/rpc" + + "google.golang.org/protobuf/proto" ) // MinCriuVersion for Podman at least CRIU 3.11 is required @@ -21,3 +27,25 @@ func CheckForCriu(version int) bool { } return result } + +func GetCriuVestion() (int, error) { + c := criu.MakeCriu() + return c.GetCriuVersion() +} + +func MemTrack() bool { + features, err := criu.MakeCriu().FeatureCheck( + &rpc.CriuFeatures{ + MemTrack: proto.Bool(true), + }, + ) + if err != nil { + return false + } + + if features == nil || features.MemTrack == nil { + return false + } + + return *features.MemTrack +} diff --git a/vendor/github.com/containers/podman/v4/pkg/criu/criu_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/criu/criu_unsupported.go new file mode 100644 index 00000000000..3e3ed9c6c9d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/criu/criu_unsupported.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package criu + +func MemTrack() bool { + return false +} diff --git a/vendor/github.com/containers/podman/v3/pkg/ctime/ctime.go b/vendor/github.com/containers/podman/v4/pkg/ctime/ctime.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/ctime/ctime.go rename to vendor/github.com/containers/podman/v4/pkg/ctime/ctime.go diff --git a/vendor/github.com/containers/podman/v3/pkg/ctime/ctime_linux.go b/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_linux.go similarity index 92% rename from vendor/github.com/containers/podman/v3/pkg/ctime/ctime_linux.go rename to vendor/github.com/containers/podman/v4/pkg/ctime/ctime_linux.go index 113693e871e..7eb3caa6dee 100644 --- a/vendor/github.com/containers/podman/v3/pkg/ctime/ctime_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ctime diff --git a/vendor/github.com/containers/podman/v3/pkg/ctime/ctime_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_unsupported.go similarity index 87% rename from vendor/github.com/containers/podman/v3/pkg/ctime/ctime_unsupported.go rename to vendor/github.com/containers/podman/v4/pkg/ctime/ctime_unsupported.go index 32573135380..afee56027a2 100644 --- a/vendor/github.com/containers/podman/v3/pkg/ctime/ctime_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ctime diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/auto-update.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/auto-update.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/auto-update.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/auto-update.go diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/container_ps.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go similarity index 97% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/container_ps.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go index 58f231a2fa6..a5562e7c9ab 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/container_ps.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go @@ -5,8 +5,8 @@ import ( "strings" "time" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/ps/define" + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/pkg/ps/define" "github.com/pkg/errors" ) @@ -54,7 +54,7 @@ type ListContainer struct { // boolean to be set PodName string // Port mappings - Ports []types.OCICNIPortMapping + Ports []types.PortMapping // Size of the container rootfs. Requires the size boolean to be true Size *define.ContainerSize // Time when container started diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go similarity index 86% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/containers.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go index deae85fe125..1db8b9951d4 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/containers.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go @@ -6,10 +6,10 @@ import ( "os" "time" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/libpod/define" - nettypes "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/specgen" "github.com/containers/storage/pkg/archive" ) @@ -129,6 +129,7 @@ type RestartReport struct { type RmOptions struct { All bool + Depend bool Force bool Ignore bool Latest bool @@ -136,11 +137,6 @@ type RmOptions struct { Volumes bool } -type RmReport struct { - Err error - Id string //nolint -} - type ContainerInspectReport struct { *define.InspectContainerData } @@ -158,6 +154,7 @@ type CommitOptions struct { Message string Pause bool Quiet bool + Squash bool Writer io.Writer } @@ -181,6 +178,7 @@ type ContainerExportOptions struct { type CheckpointOptions struct { All bool Export string + CreateImage string IgnoreRootFS bool IgnoreVolumes bool Keep bool @@ -190,11 +188,15 @@ type CheckpointOptions struct { PreCheckPoint bool WithPrevious bool Compression archive.Compression + PrintStats bool + FileLocks bool } type CheckpointReport struct { - Err error - Id string //nolint + Err error `json:"-"` + Id string `json:"Id` //nolint + RuntimeDuration int64 `json:"runtime_checkpoint_duration"` + CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` } type RestoreOptions struct { @@ -204,18 +206,23 @@ type RestoreOptions struct { IgnoreStaticIP bool IgnoreStaticMAC bool Import string + CheckpointImage bool Keep bool Latest bool Name string TCPEstablished bool ImportPrevious string - PublishPorts []nettypes.PortMapping + PublishPorts []string Pod string + PrintStats bool + FileLocks bool } type RestoreReport struct { - Err error - Id string //nolint + Err error `json:"-"` + Id string `json:"Id` //nolint + RuntimeDuration int64 `json:"runtime_restore_duration"` + CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` } type ContainerCreateReport struct { @@ -252,6 +259,8 @@ type ContainerLogsOptions struct { Tail int64 // Show timestamps in the logs. Timestamps bool + // Show different colors in the logs. + Colors bool // Write the stdout to this Writer. StdoutWriter io.Writer // Write the stderr to this Writer. @@ -333,6 +342,7 @@ type ContainerRunOptions struct { Rm bool SigProxy bool Spec *specgen.SpecGenerator + Passwd bool } // ContainerRunReport describes the results of running @@ -375,7 +385,7 @@ type ContainerInitReport struct { Id string //nolint } -//ContainerMountOptions describes the input values for mounting containers +// ContainerMountOptions describes the input values for mounting containers // in the CLI type ContainerMountOptions struct { All bool @@ -422,7 +432,7 @@ type ContainerPortOptions struct { // the CLI to output ports type ContainerPortReport struct { Id string //nolint - Ports []nettypes.OCICNIPortMapping + Ports []nettypes.PortMapping } // ContainerCpOptions describes input options for cp. @@ -458,3 +468,14 @@ type ContainerRenameOptions struct { // NewName is the new name that will be given to the container. NewName string } + +// ContainerCloneOptions contains options for cloning an existing continer +type ContainerCloneOptions struct { + ID string + Destroy bool + CreateOpts ContainerCreateOptions + Image string + RawImageName string + Run bool + Force bool +} diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/engine.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go similarity index 94% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/engine.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go index a8023f7cffc..32faa74afd8 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/engine.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go @@ -33,13 +33,14 @@ type PodmanConfig struct { *config.Config *pflag.FlagSet - CGroupUsage string // rootless code determines Usage message + CgroupUsage string // rootless code determines Usage message ConmonPath string // --conmon flag will set Engine.ConmonPath CPUProfile string // Hidden: Should CPU profile be taken EngineMode EngineMode // ABI or Tunneling mode Identity string // ssh identity for connecting to server MaxWorks int // maximum number of parallel threads MemoryProfile string // Hidden: Should memory profile be taken + NoOut bool // Don't output to stdout RegistriesConf string // allows for specifying a custom registries.conf Remote bool // Connection to Podman API Service will use RESTful API RuntimePath string // --runtime flag will set Engine.RuntimePath diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_container.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go similarity index 91% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_container.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go index 383e420986a..6b70a34524a 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_container.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go @@ -4,11 +4,11 @@ import ( "context" "io" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/domain/entities/reports" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v4/pkg/specgen" ) type ContainerCopyFunc func() error @@ -19,6 +19,7 @@ type ContainerEngine interface { ContainerAttach(ctx context.Context, nameOrID string, options AttachOptions) error ContainerCheckpoint(ctx context.Context, namesOrIds []string, options CheckpointOptions) ([]*CheckpointReport, error) ContainerCleanup(ctx context.Context, namesOrIds []string, options ContainerCleanupOptions) ([]*ContainerCleanupReport, error) + ContainerClone(ctx context.Context, ctrClone ContainerCloneOptions) (*ContainerCreateReport, error) ContainerCommit(ctx context.Context, nameOrID string, options CommitOptions) (*CommitReport, error) ContainerCopyFromArchive(ctx context.Context, nameOrID, path string, reader io.Reader, options CopyOptions) (ContainerCopyFunc, error) ContainerCopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (ContainerCopyFunc, error) @@ -40,7 +41,7 @@ type ContainerEngine interface { ContainerRename(ctr context.Context, nameOrID string, options ContainerRenameOptions) error ContainerRestart(ctx context.Context, namesOrIds []string, options RestartOptions) ([]*RestartReport, error) ContainerRestore(ctx context.Context, namesOrIds []string, options RestoreOptions) ([]*RestoreReport, error) - ContainerRm(ctx context.Context, namesOrIds []string, options RmOptions) ([]*RmReport, error) + ContainerRm(ctx context.Context, namesOrIds []string, options RmOptions) ([]*reports.RmReport, error) ContainerRun(ctx context.Context, opts ContainerRunOptions) (*ContainerRunReport, error) ContainerRunlabel(ctx context.Context, label string, image string, args []string, opts ContainerRunlabelOptions) error ContainerStart(ctx context.Context, namesOrIds []string, options ContainerStartOptions) ([]*ContainerStartReport, error) @@ -67,8 +68,8 @@ type ContainerEngine interface { NetworkPrune(ctx context.Context, options NetworkPruneOptions) ([]*NetworkPruneReport, error) NetworkReload(ctx context.Context, names []string, options NetworkReloadOptions) ([]*NetworkReloadReport, error) NetworkRm(ctx context.Context, namesOrIds []string, options NetworkRmOptions) ([]*NetworkRmReport, error) - PlayKube(ctx context.Context, path string, opts PlayKubeOptions) (*PlayKubeReport, error) - PlayKubeDown(ctx context.Context, path string, opts PlayKubeDownOptions) (*PlayKubeReport, error) + PlayKube(ctx context.Context, body io.Reader, opts PlayKubeOptions) (*PlayKubeReport, error) + PlayKubeDown(ctx context.Context, body io.Reader, opts PlayKubeDownOptions) (*PlayKubeReport, error) PodCreate(ctx context.Context, specg PodSpec) (*PodCreateReport, error) PodExists(ctx context.Context, nameOrID string) (*BoolReport, error) PodInspect(ctx context.Context, options PodInspectOptions) (*PodInspectReport, error) @@ -98,6 +99,8 @@ type ContainerEngine interface { VolumeMounted(ctx context.Context, namesOrID string) (*BoolReport, error) VolumeInspect(ctx context.Context, namesOrIds []string, opts InspectOptions) ([]*VolumeInspectReport, []error, error) VolumeList(ctx context.Context, opts VolumeListOptions) ([]*VolumeListReport, error) + VolumeMount(ctx context.Context, namesOrIds []string) ([]*VolumeMountReport, error) VolumePrune(ctx context.Context, options VolumePruneOptions) ([]*reports.PruneReport, error) VolumeRm(ctx context.Context, namesOrIds []string, opts VolumeRmOptions) ([]*VolumeRmReport, error) + VolumeUnmount(ctx context.Context, namesOrIds []string) ([]*VolumeUnmountReport, error) } diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_image.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go similarity index 80% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_image.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go index b0f9ae40840..5011d82aa79 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_image.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go @@ -4,7 +4,7 @@ import ( "context" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/pkg/domain/entities/reports" + "github.com/containers/podman/v4/pkg/domain/entities/reports" ) type ImageEngine interface { @@ -27,15 +27,16 @@ type ImageEngine interface { ShowTrust(ctx context.Context, args []string, options ShowTrustOptions) (*ShowTrustReport, error) Shutdown(ctx context.Context) Tag(ctx context.Context, nameOrID string, tags []string, options ImageTagOptions) error + Transfer(ctx context.Context, source ImageScpOptions, dest ImageScpOptions, parentFlags []string) error Tree(ctx context.Context, nameOrID string, options ImageTreeOptions) (*ImageTreeReport, error) Unmount(ctx context.Context, images []string, options ImageUnmountOptions) ([]*ImageUnmountReport, error) Untag(ctx context.Context, nameOrID string, tags []string, options ImageUntagOptions) error - ManifestCreate(ctx context.Context, names, images []string, opts ManifestCreateOptions) (string, error) + ManifestCreate(ctx context.Context, name string, images []string, opts ManifestCreateOptions) (string, error) ManifestExists(ctx context.Context, name string) (*BoolReport, error) ManifestInspect(ctx context.Context, name string) ([]byte, error) - ManifestAdd(ctx context.Context, opts ManifestAddOptions) (string, error) - ManifestAnnotate(ctx context.Context, names []string, opts ManifestAnnotateOptions) (string, error) - ManifestRemove(ctx context.Context, names []string) (string, error) + ManifestAdd(ctx context.Context, listName string, imageNames []string, opts ManifestAddOptions) (string, error) + ManifestAnnotate(ctx context.Context, names, image string, opts ManifestAnnotateOptions) (string, error) + ManifestRemoveDigest(ctx context.Context, names, image string) (string, error) ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error) ManifestPush(ctx context.Context, name, destination string, imagePushOpts ImagePushOptions) (string, error) Sign(ctx context.Context, names []string, options SignOptions) (*SignReport, error) diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_system.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_system.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/engine_system.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_system.go diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/events.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go similarity index 94% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/events.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go index 73a375b9414..d8ba0f1d3ea 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/events.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go @@ -4,7 +4,7 @@ import ( "strconv" "time" - libpodEvents "github.com/containers/podman/v3/libpod/events" + libpodEvents "github.com/containers/podman/v4/libpod/events" dockerEvents "github.com/docker/docker/api/types/events" ) @@ -42,7 +42,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event { Image: image, Name: name, Status: status, - Time: time.Unix(e.Time, e.TimeNano), + Time: time.Unix(0, e.TimeNano), Type: t, Details: libpodEvents.Details{ Attributes: details, diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/filters.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/filters.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/filters.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/filters.go diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/generate.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go similarity index 69% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/generate.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go index 7809c524142..73dd64ecd02 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/generate.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go @@ -10,6 +10,10 @@ type GenerateSystemdOptions struct { New bool // RestartPolicy - systemd restart policy. RestartPolicy *string + // RestartSec - systemd service restartsec. Configures the time to sleep before restarting a service. + RestartSec *uint + // StartTimeout - time when starting the container. + StartTimeout *uint // StopTimeout - time when stopping the container. StopTimeout *uint // ContainerPrefix - systemd unit name prefix for containers @@ -20,6 +24,14 @@ type GenerateSystemdOptions struct { Separator string // NoHeader - skip header generation NoHeader bool + // TemplateUnitFile - make use of %i and %I to differentiate between the different instances of the unit + TemplateUnitFile bool + // Wants - systemd wants list for the container or pods + Wants []string + // After - systemd after list for the container or pods + After []string + // Requires - systemd requires list for the container or pods + Requires []string } // GenerateSystemdReport diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/healthcheck.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/healthcheck.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/healthcheck.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/healthcheck.go diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/images.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go similarity index 90% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/images.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go index 2822b1ad71f..7081c5d25a5 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/images.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go @@ -7,8 +7,8 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/pkg/inspect" - "github.com/containers/podman/v3/pkg/trust" + "github.com/containers/podman/v4/pkg/inspect" + "github.com/containers/podman/v4/pkg/trust" "github.com/docker/docker/api/types/container" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -50,6 +50,7 @@ func (i *Image) Id() string { // nolint return i.ID } +// swagger:model LibpodImageSummary type ImageSummary struct { ID string `json:"Id"` ParentId string // nolint @@ -89,11 +90,13 @@ type ImageRemoveOptions struct { All bool // Foce will force image removal including containers using the images. Force bool + // Ignore if a specified image does not exist and do not throw an error. + Ignore bool // Confirms if given name is a manifest list and removes it, otherwise returns error. LookupManifest bool } -// ImageRemoveResponse is the response for removing one or more image(s) from storage +// ImageRemoveReport is the response for removing one or more image(s) from storage // and images what was untagged vs actually removed. type ImageRemoveReport struct { // Deleted images. @@ -207,6 +210,8 @@ type ImagePushOptions struct { SkipTLSVerify types.OptionalBool // Progress to get progress notifications Progress chan types.ProgressProperties + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat string } // ImageSearchOptions are the arguments for searching images. @@ -218,8 +223,6 @@ type ImageSearchOptions struct { Filters []string // Limit the number of results. Limit int - // NoTrunc will not truncate the output. - NoTrunc bool // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify types.OptionalBool // ListTags search the available tags of the repository @@ -276,6 +279,7 @@ type ImageLoadReport struct { type ImageImportOptions struct { Architecture string + Variant string Changes []string Message string OS string @@ -310,26 +314,28 @@ type ImageSaveOptions struct { Quiet bool } -// ImageScpOptions provide options for securely copying images to podman remote +// ImageScpOptions provide options for securely copying images to and from a remote host type ImageScpOptions struct { - // SoureImageName is the image the user is providing to load on a remote machine - SourceImageName string - // Tag allows for a new image to be created under the given name - Tag string - // ToRemote specifies that we are loading to the remote host - ToRemote bool - // FromRemote specifies that we are loading from the remote host - FromRemote bool + // Remote determines if this entity is operating on a remote machine + Remote bool `json:"remote,omitempty"` + // File is the input/output file for the save and load Operation + File string `json:"file,omitempty"` + // Quiet Determines if the save and load operation will be done quietly + Quiet bool `json:"quiet,omitempty"` + // Image is the image the user is providing to save and load + Image string `json:"image,omitempty"` + // User is used in conjunction with Transfer to determine if a valid user was given to save from/load into + User string `json:"user,omitempty"` +} + +// ImageScpConnections provides the ssh related information used in remote image transfer +type ImageScpConnections struct { // Connections holds the raw string values for connections (ssh or unix) Connections []string // URI contains the ssh connection URLs to be used by the client URI []*url.URL - // Iden contains ssh identity keys to be used by the client - Iden []string - // Save Options used for first half of the scp operation - Save ImageSaveOptions - // Load options used for the second half of the scp operation - Load ImageLoadOptions + // Identities contains ssh identity keys to be used by the client + Identities []string } // ImageTreeOptions provides options for ImageEngine.Tree() @@ -370,6 +376,7 @@ type SignOptions struct { Directory string SignBy string CertDir string + Authfile string All bool } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go new file mode 100644 index 00000000000..81f3e837bb8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go @@ -0,0 +1,80 @@ +package entities + +import "github.com/containers/image/v5/types" + +// ManifestCreateOptions provides model for creating manifest +type ManifestCreateOptions struct { + All bool `schema:"all"` +} + +// ManifestAddOptions provides model for adding digests to manifest list +// +// swagger:model +type ManifestAddOptions struct { + ManifestAnnotateOptions + // True when operating on a list to include all images + All bool `json:"all" schema:"all"` + // authfile to use when pushing manifest list + Authfile string `json:"-" schema:"-"` + // Home directory for certificates when pushing a manifest list + CertDir string `json:"-" schema:"-"` + // Password to authenticate to registry when pushing manifest list + Password string `json:"-" schema:"-"` + // Should TLS registry certificate be verified? + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` + // Username to authenticate to registry when pushing manifest list + Username string `json:"-" schema:"-"` + // Images is an optional list of images to add to manifest list + Images []string `json:"images" schema:"images"` +} + +// ManifestAnnotateOptions provides model for annotating manifest list +type ManifestAnnotateOptions struct { + // Annotation to add to manifest list + Annotation []string `json:"annotation" schema:"annotation"` + // Arch overrides the architecture for the image + Arch string `json:"arch" schema:"arch"` + // Feature list for the image + Features []string `json:"features" schema:"features"` + // OS overrides the operating system for the image + OS string `json:"os" schema:"os"` + // OS features for the image + OSFeatures []string `json:"os_features" schema:"os_features"` + // OSVersion overrides the operating system for the image + OSVersion string `json:"os_version" schema:"os_version"` + // Variant for the image + Variant string `json:"variant" schema:"variant"` +} + +// ManifestModifyOptions provides the model for mutating a manifest +// +// swagger 2.0 does not support oneOf for schema validation. +// +// Operation "update" uses all fields. +// Operation "remove" uses fields: Operation and Images +// Operation "annotate" uses fields: Operation and Annotations +// +// swagger:model +type ManifestModifyOptions struct { + Operation string `json:"operation" schema:"operation"` // Valid values: update, remove, annotate + ManifestAddOptions + ManifestRemoveOptions +} + +// ManifestRemoveOptions provides the model for removing digests from a manifest +// +// swagger:model +type ManifestRemoveOptions struct { +} + +// ManifestModifyReport provides the model for removed digests and changed manifest +// +// swagger:model +type ManifestModifyReport struct { + // Manifest List ID + ID string `json:"Id"` + // Images to removed from manifest list, otherwise not provided. + Images []string `json:"images,omitempty" schema:"images"` + // Errors associated with operation + Errors []error `json:"errors,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/network.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go similarity index 86% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/network.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go index d7389a699f9..0f901c7f14d 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/network.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go @@ -2,6 +2,8 @@ package entities import ( "net" + + "github.com/containers/common/libnetwork/types" ) // NetworkListOptions describes options for listing networks in cli @@ -20,7 +22,7 @@ type NetworkReloadOptions struct { // NetworkReloadReport describes the results of reloading a container network. type NetworkReloadReport struct { - // nolint:stylecheck,golint + // nolint:stylecheck,revive Id string Err error } @@ -31,7 +33,7 @@ type NetworkRmOptions struct { Timeout *uint } -//NetworkRmReport describes the results of network removal +// NetworkRmReport describes the results of network removal type NetworkRmReport struct { Name string Err error @@ -41,12 +43,12 @@ type NetworkRmReport struct { type NetworkCreateOptions struct { DisableDNS bool Driver string - Gateway net.IP + Gateways []net.IP Internal bool Labels map[string]string MacVLAN string - Range net.IPNet - Subnet net.IPNet + Ranges []string + Subnets []string IPv6 bool // Mapping of driver options and values. Options map[string]string @@ -67,8 +69,8 @@ type NetworkDisconnectOptions struct { // NetworkConnectOptions describes options for connecting // a container to a network type NetworkConnectOptions struct { - Aliases []string - Container string + Container string `json:"container"` + types.PerNetworkOptions } // NetworkPruneReport containers the name of network and an error diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/play.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go similarity index 86% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/play.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go index af4b0fc350f..c9dc3f08c2d 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/play.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go @@ -8,15 +8,21 @@ import ( // PlayKubeOptions controls playing kube YAML files. type PlayKubeOptions struct { + // Annotations - Annotations to add to Pods + Annotations map[string]string // Authfile - path to an authentication file. Authfile string // Indicator to build all images with Containerfile or Dockerfile - Build bool + Build types.OptionalBool // CertDir - to a directory containing TLS certifications and keys. CertDir string + // ContextDir - directory containing image contexts used for Build + ContextDir string // Down indicates whether to bring contents of a yaml file "down" // as in stop Down bool + // Replace indicates whether to delete and recreate a yaml file + Replace bool // Do not create /etc/hosts within the pod's containers, // instead use the version from the image NoHosts bool @@ -24,8 +30,8 @@ type PlayKubeOptions struct { Username string // Password for authenticating against the registry. Password string - // Network - name of the CNI network to connect to. - Network string + // Networks - name of the network to connect to. + Networks []string // Quiet - suppress output when pulling images. Quiet bool // SignaturePolicy - path to a signature-policy file. @@ -44,6 +50,8 @@ type PlayKubeOptions struct { ConfigMaps []string // LogDriver for the container. For example: journald LogDriver string + // LogOptions for the log driver for the container. + LogOptions []string // Start - don't start the pod if false Start types.OptionalBool } diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/pods.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go similarity index 87% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/pods.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go index 3096773965d..1e25e087264 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/pods.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go @@ -5,9 +5,10 @@ import ( "strings" "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/pkg/util" + commonFlag "github.com/containers/common/pkg/flag" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -117,7 +118,7 @@ type PodSpec struct { // The JSON tags below are made to match the respective field in ContainerCreateOptions for the purpose of mapping. // swagger:model PodCreateOptions type PodCreateOptions struct { - CGroupParent string `json:"cgroup_parent,omitempty"` + CgroupParent string `json:"cgroup_parent,omitempty"` CreateCommand []string `json:"create_command,omitempty"` Devices []string `json:"devices,omitempty"` DeviceReadBPs []string `json:"device_read_bps,omitempty"` @@ -131,12 +132,15 @@ type PodCreateOptions struct { Name string `json:"name,omitempty"` Net *NetOptions `json:"net,omitempty"` Share []string `json:"share,omitempty"` + ShareParent *bool `json:"share_parent,omitempty"` Pid string `json:"pid,omitempty"` Cpus float64 `json:"cpus,omitempty"` CpusetCpus string `json:"cpuset_cpus,omitempty"` Userns specgen.Namespace `json:"-"` Volume []string `json:"volume,omitempty"` VolumesFrom []string `json:"volumes_from,omitempty"` + SecurityOpt []string `json:"security_opt,omitempty"` + Sysctl []string `json:"sysctl,omitempty"` } // PodLogsOptions describes the options to extract pod logs. @@ -145,6 +149,8 @@ type PodLogsOptions struct { ContainerLogsOptions // If specified will only fetch the logs of specified container ContainerName string + // Show different colors in the logs. + Color bool } type ContainerCreateOptions struct { @@ -156,8 +162,8 @@ type ContainerCreateOptions struct { CapAdd []string CapDrop []string CgroupNS string - CGroupsMode string - CGroupParent string `json:"cgroup_parent,omitempty"` + CgroupsMode string + CgroupParent string `json:"cgroup_parent,omitempty"` CIDFile string ConmonPIDFile string `json:"container_conmon_pidfile,omitempty"` CPUPeriod uint64 @@ -169,7 +175,7 @@ type ContainerCreateOptions struct { CPUSetCPUs string `json:"cpuset_cpus,omitempty"` CPUSetMems string Devices []string `json:"devices,omitempty"` - DeviceCGroupRule []string + DeviceCgroupRule []string DeviceReadBPs []string `json:"device_read_bps,omitempty"` DeviceReadIOPs []string DeviceWriteBPs []string @@ -188,13 +194,13 @@ type ContainerCreateOptions struct { HealthTimeout string Hostname string `json:"hostname,omitempty"` HTTPProxy bool + HostUsers []string ImageVolume string Init bool InitContainerType string InitPath string Interactive bool IPC string - KernelMemory string Label []string LabelFile []string LogDriver string @@ -206,7 +212,7 @@ type ContainerCreateOptions struct { Name string `json:"container_name"` NoHealthCheck bool OOMKillDisable bool - OOMScoreAdj int + OOMScoreAdj *int Arch string OS string Variant string @@ -229,23 +235,25 @@ type ContainerCreateOptions struct { Rm bool RootFS bool Secrets []string - SecurityOpt []string + SecurityOpt []string `json:"security_opt,omitempty"` SdNotifyMode string ShmSize string SignaturePolicy string StopSignal string StopTimeout uint - StorageOpt []string + StorageOpts []string SubUIDName string SubGIDName string - Sysctl []string + Sysctl []string `json:"sysctl,omitempty"` Systemd string Timeout uint - TLSVerify bool + TLSVerify commonFlag.OptionalBool TmpFS []string TTY bool Timezone string Umask string + UnsetEnv []string + UnsetEnvAll bool UIDMap []string Ulimit []string User string @@ -257,11 +265,24 @@ type ContainerCreateOptions struct { Workdir string SeccompPolicy string PidFile string + ChrootDirs []string IsInfra bool + IsClone bool Net *NetOptions `json:"net,omitempty"` CgroupConf []string + + PasswdEntry string +} + +func NewInfraContainerCreateOptions() ContainerCreateOptions { + options := ContainerCreateOptions{ + IsInfra: true, + ImageVolume: "bind", + MemorySwappiness: -1, + } + return options } type PodCreateReport struct { @@ -300,6 +321,7 @@ func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.Pod s.Hostname = p.Hostname s.Labels = p.Labels s.Devices = p.Devices + s.SecurityOpt = p.SecurityOpt s.NoInfra = !p.Infra if p.InfraCommand != nil && len(*p.InfraCommand) > 0 { s.InfraCommand = strings.Split(*p.InfraCommand, " ") @@ -309,6 +331,7 @@ func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.Pod } s.InfraImage = p.InfraImage s.SharedNamespaces = p.Share + s.ShareParent = p.ShareParent s.PodCreateCommand = p.CreateCommand s.VolumesFrom = p.VolumesFrom @@ -316,10 +339,8 @@ func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.Pod if p.Net != nil { s.NetNS = p.Net.Network - s.StaticIP = p.Net.StaticIP - s.StaticMAC = p.Net.StaticMAC s.PortMappings = p.Net.PublishPorts - s.CNINetworks = p.Net.CNINetworks + s.Networks = p.Net.Networks s.NetworkOptions = p.Net.NetworkOptions if p.Net.UseImageResolvConf { s.NoManageResolvConf = true @@ -332,7 +353,7 @@ func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.Pod } // Cgroup - s.CgroupParent = p.CGroupParent + s.CgroupParent = p.CgroupParent // Resource config cpuDat := p.CPULimits() @@ -348,6 +369,15 @@ func ToPodSpecGen(s specgen.PodSpecGenerator, p *PodCreateOptions) (*specgen.Pod } } s.Userns = p.Userns + sysctl := map[string]string{} + if ctl := p.Sysctl; len(ctl) > 0 { + sysctl, err = util.ValidateSysctls(ctl) + if err != nil { + return nil, err + } + } + s.Sysctl = sysctl + return &s, nil } @@ -456,6 +486,7 @@ func PodLogsOptionsToContainerLogsOptions(options PodLogsOptions) ContainerLogsO Until: options.Until, Tail: options.Tail, Timestamps: options.Timestamps, + Colors: options.Colors, StdoutWriter: options.StdoutWriter, StderrWriter: options.StderrWriter, } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go new file mode 100644 index 00000000000..54bcd092baf --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go @@ -0,0 +1,28 @@ +package reports + +type RmReport struct { + Id string `json:"Id"` //nolint + Err error `json:"Err,omitempty"` +} + +func RmReportsIds(r []*RmReport) []string { + ids := make([]string, 0, len(r)) + for _, v := range r { + if v == nil || v.Id == "" { + continue + } + ids = append(ids, v.Id) + } + return ids +} + +func RmReportsErrs(r []*RmReport) []error { + errs := make([]error, 0, len(r)) + for _, v := range r { + if v == nil || v.Err == nil { + continue + } + errs = append(errs, v.Err) + } + return errs +} diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/reports/prune.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go similarity index 83% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/reports/prune.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go index 5494ac3ae5f..497e5d6069d 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/reports/prune.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go @@ -1,9 +1,9 @@ package reports type PruneReport struct { - Id string //nolint - Err error - Size uint64 + Id string `json:"Id"` //nolint + Err error `json:"Err,omitempty"` + Size uint64 `json:"Size"` } func PruneReportsIds(r []*PruneReport) []string { @@ -34,7 +34,7 @@ func PruneReportsSize(r []*PruneReport) uint64 { if v == nil { continue } - size = size + v.Size + size += v.Size } return size } diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/secrets.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go similarity index 97% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/secrets.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go index 55b470d7b86..d8af937a721 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/secrets.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go @@ -3,7 +3,7 @@ package entities import ( "time" - "github.com/containers/podman/v3/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/errorhandling" ) type SecretCreateReport struct { diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/set.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/set.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/set.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/set.go diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/system.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go similarity index 81% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/system.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go index cca4bf44ee8..21026477d5b 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/system.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go @@ -3,18 +3,17 @@ package entities import ( "time" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/domain/entities/reports" - "github.com/containers/podman/v3/pkg/domain/entities/types" - "github.com/spf13/cobra" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v4/pkg/domain/entities/types" ) -// ServiceOptions provides the input for starting an API Service +// ServiceOptions provides the input for starting an API and sidecar pprof services type ServiceOptions struct { - URI string // Path to unix domain socket service should listen on - Timeout time.Duration // duration of inactivity the service should wait before shutting down - Command *cobra.Command // CLI command provided. Used in V1 code - CorsHeaders string // CORS headers + CorsHeaders string // Cross-Origin Resource Sharing (CORS) headers + PProfAddr string // Network address to bind pprof profiles service + Timeout time.Duration // Duration of inactivity the service should wait before shutting down + URI string // Path to unix domain socket service should listen on } // SystemPruneOptions provides options to prune system. @@ -101,7 +100,7 @@ type SystemVersionReport struct { // SystemUnshareOptions describes the options for the unshare command type SystemUnshareOptions struct { - RootlessCNI bool + RootlessNetNS bool } type ComponentVersion struct { diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/types.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go similarity index 71% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/types.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go index ec4d4a90284..bed3183e9f4 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go @@ -4,10 +4,10 @@ import ( "net" buildahDefine "github.com/containers/buildah/define" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/libpod/events" - "github.com/containers/podman/v3/libpod/network/types" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v4/pkg/specgen" "github.com/containers/storage/pkg/archive" ) @@ -20,7 +20,7 @@ type Volume struct { } type Report struct { - Id []string //nolint + Id []string // nolint Err map[string]error } @@ -29,8 +29,6 @@ type PodDeleteReport struct{ Report } type VolumeDeleteOptions struct{} type VolumeDeleteReport struct{ Report } -// NetOptions reflect the shared network options between -// pods and containers type NetFlags struct { AddHosts []string `json:"add-host,omitempty"` DNS []string `json:"dns,omitempty"` @@ -43,19 +41,20 @@ type NetFlags struct { Network string `json:"network,omitempty"` NetworkAlias []string `json:"network-alias,omitempty"` } + +// NetOptions reflect the shared network options between +// pods and containers type NetOptions struct { - AddHosts []string `json:"hostadd,omitempty"` - Aliases []string `json:"network_alias,omitempty"` - CNINetworks []string `json:"cni_networks,omitempty"` - UseImageResolvConf bool `json:"no_manage_resolv_conf,omitempty"` - DNSOptions []string `json:"dns_option,omitempty"` - DNSSearch []string `json:"dns_search,omitempty"` - DNSServers []net.IP `json:"dns_server,omitempty"` - Network specgen.Namespace `json:"netns,omitempty"` - NoHosts bool `json:"no_manage_hosts,omitempty"` - PublishPorts []types.PortMapping `json:"portmappings,omitempty"` - StaticIP *net.IP `json:"static_ip,omitempty"` - StaticMAC *net.HardwareAddr `json:"static_mac,omitempty"` + AddHosts []string `json:"hostadd,omitempty"` + Aliases []string `json:"network_alias,omitempty"` + Networks map[string]types.PerNetworkOptions `json:"networks,omitempty"` + UseImageResolvConf bool `json:"no_manage_resolv_conf,omitempty"` + DNSOptions []string `json:"dns_option,omitempty"` + DNSSearch []string `json:"dns_search,omitempty"` + DNSServers []net.IP `json:"dns_server,omitempty"` + Network specgen.Namespace `json:"netns,omitempty"` + NoHosts bool `json:"no_manage_hosts,omitempty"` + PublishPorts []types.PortMapping `json:"portmappings,omitempty"` // NetworkOptions are additional options for each network NetworkOptions map[string][]string `json:"network_options,omitempty"` } @@ -99,8 +98,10 @@ type EventsOptions struct { // ContainerCreateResponse is the response struct for creating a container type ContainerCreateResponse struct { // ID of the container created + // required: true ID string `json:"Id"` // Warnings during container creation + // required: true Warnings []string `json:"Warnings"` } diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/types/auth.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/auth.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/types/auth.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/types/auth.go diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/types/types.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/types/types.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go diff --git a/vendor/github.com/containers/podman/v3/pkg/domain/entities/volumes.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go similarity index 89% rename from vendor/github.com/containers/podman/v3/pkg/domain/entities/volumes.go rename to vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go index 2ecfb4446fc..f2e60a0db28 100644 --- a/vendor/github.com/containers/podman/v3/pkg/domain/entities/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go @@ -3,7 +3,7 @@ package entities import ( "net/url" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" docker_api_types "github.com/docker/docker/api/types" docker_api_types_volume "github.com/docker/docker/api/types/volume" ) @@ -78,8 +78,10 @@ type VolumeCreateOptions struct { Name string `schema:"name"` // Volume driver to use Driver string `schema:"driver"` - // User-defined key/value metadata. + // User-defined key/value metadata. Provided for compatibility Label map[string]string `schema:"label"` + // User-defined key/value metadata. Preferred field, will override Label + Labels map[string]string `schema:"labels"` // Mapping of driver options and values. Options map[string]string `schema:"opts"` } @@ -185,3 +187,17 @@ type VolumeCreateBody struct { // Required: true Name string `json:"Name"` } + +// VolumeMountReport describes the response from volume mount +type VolumeMountReport struct { + Err error + Id string //nolint + Name string + Path string +} + +// VolumeUnmountReport describes the response from umounting a volume +type VolumeUnmountReport struct { + Err error + Id string //nolint +} diff --git a/vendor/github.com/containers/podman/v3/pkg/env/env.go b/vendor/github.com/containers/podman/v4/pkg/env/env.go similarity index 98% rename from vendor/github.com/containers/podman/v3/pkg/env/env.go rename to vendor/github.com/containers/podman/v4/pkg/env/env.go index ecd2d62a512..5989d0da534 100644 --- a/vendor/github.com/containers/podman/v3/pkg/env/env.go +++ b/vendor/github.com/containers/podman/v4/pkg/env/env.go @@ -26,7 +26,7 @@ func DefaultEnvVariables() map[string]string { // Slice transforms the specified map of environment variables into a // slice. If a value is non-empty, the key and value are joined with '='. func Slice(m map[string]string) []string { - env := make([]string, len(m)) + env := make([]string, 0, len(m)) for k, v := range m { var s string if len(v) > 0 { diff --git a/vendor/github.com/containers/podman/v3/pkg/env/env_supported.go b/vendor/github.com/containers/podman/v4/pkg/env/env_unix.go similarity index 88% rename from vendor/github.com/containers/podman/v3/pkg/env/env_supported.go rename to vendor/github.com/containers/podman/v4/pkg/env/env_unix.go index 8be9f9592b9..690078f33c4 100644 --- a/vendor/github.com/containers/podman/v3/pkg/env/env_supported.go +++ b/vendor/github.com/containers/podman/v4/pkg/env/env_unix.go @@ -1,4 +1,5 @@ -// +build linux darwin +//go:build !windows +// +build !windows package env diff --git a/vendor/github.com/containers/podman/v4/pkg/env/env_windows.go b/vendor/github.com/containers/podman/v4/pkg/env/env_windows.go new file mode 100644 index 00000000000..5df08fadcd9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/env/env_windows.go @@ -0,0 +1,25 @@ +package env + +// ParseSlice parses the specified slice and transforms it into an environment +// map. +func ParseSlice(s []string) (map[string]string, error) { + env := make(map[string]string, len(s)) + for _, e := range s { + if len(e) > 0 && e[0] == '=' { + // The legacy Windows CMD command interpreter uses a hack, where to emulate + // DOS semantics, it uses an illegal (by windows definition) env name for + // state storage to avoid conlficting with user defined env names. This is + // used to preserve drive letter paths. E.g., typing c: from another drive + // will remember the last CWD because CMD stores it in an env named "=C:". + // Since these are illegal, they are filtered from standard user access but + // are still available in the underlying win32 API calls. Since they have + // zero value to a container, we filter as well. + continue + } + + if err := parseEnv(env, e); err != nil { + return nil, err + } + } + return env, nil +} diff --git a/vendor/github.com/containers/podman/v3/pkg/errorhandling/errorhandling.go b/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go similarity index 90% rename from vendor/github.com/containers/podman/v3/pkg/errorhandling/errorhandling.go rename to vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go index 44a0c3efd3d..e33c260321b 100644 --- a/vendor/github.com/containers/podman/v3/pkg/errorhandling/errorhandling.go +++ b/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go @@ -28,7 +28,7 @@ func JoinErrors(errs []error) error { finalErr := multiE.ErrorOrNil() if finalErr == nil { - return finalErr + return nil } return errors.New(strings.TrimSpace(finalErr.Error())) } @@ -83,6 +83,12 @@ func Contains(err error, sub error) bool { return strings.Contains(err.Error(), sub.Error()) } +// PodConflictErrorModel is used in remote connections with podman +type PodConflictErrorModel struct { + Errs []string + Id string //nolint +} + // ErrorModel is used in remote connections with podman type ErrorModel struct { // API root cause formatted for automated parsing @@ -106,3 +112,11 @@ func (e ErrorModel) Cause() error { func (e ErrorModel) Code() int { return e.ResponseCode } + +func (e PodConflictErrorModel) Error() string { + return strings.Join(e.Errs, ",") +} + +func (e PodConflictErrorModel) Code() int { + return 409 +} diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/0.1.0/hook.go b/vendor/github.com/containers/podman/v4/pkg/hooks/0.1.0/hook.go similarity index 97% rename from vendor/github.com/containers/podman/v3/pkg/hooks/0.1.0/hook.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/0.1.0/hook.go index fb576017213..c3df5fa1df8 100644 --- a/vendor/github.com/containers/podman/v3/pkg/hooks/0.1.0/hook.go +++ b/vendor/github.com/containers/podman/v4/pkg/hooks/0.1.0/hook.go @@ -6,7 +6,7 @@ import ( "errors" "strings" - current "github.com/containers/podman/v3/pkg/hooks/1.0.0" + current "github.com/containers/podman/v4/pkg/hooks/1.0.0" rspec "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/1.0.0/hook.go b/vendor/github.com/containers/podman/v4/pkg/hooks/1.0.0/hook.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/hooks/1.0.0/hook.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/1.0.0/hook.go diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/1.0.0/when.go b/vendor/github.com/containers/podman/v4/pkg/hooks/1.0.0/when.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/hooks/1.0.0/when.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/1.0.0/when.go diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/README.md b/vendor/github.com/containers/podman/v4/pkg/hooks/README.md similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/hooks/README.md rename to vendor/github.com/containers/podman/v4/pkg/hooks/README.md diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/exec/exec.go b/vendor/github.com/containers/podman/v4/pkg/hooks/exec/exec.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/hooks/exec/exec.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/exec/exec.go diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/exec/runtimeconfigfilter.go b/vendor/github.com/containers/podman/v4/pkg/hooks/exec/runtimeconfigfilter.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/hooks/exec/runtimeconfigfilter.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/exec/runtimeconfigfilter.go diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/hooks.go b/vendor/github.com/containers/podman/v4/pkg/hooks/hooks.go similarity index 98% rename from vendor/github.com/containers/podman/v3/pkg/hooks/hooks.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/hooks.go index f49195f658b..ecd5b7dfc15 100644 --- a/vendor/github.com/containers/podman/v3/pkg/hooks/hooks.go +++ b/vendor/github.com/containers/podman/v4/pkg/hooks/hooks.go @@ -9,7 +9,7 @@ import ( "strings" "sync" - current "github.com/containers/podman/v3/pkg/hooks/1.0.0" + current "github.com/containers/podman/v4/pkg/hooks/1.0.0" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/monitor.go b/vendor/github.com/containers/podman/v4/pkg/hooks/monitor.go similarity index 96% rename from vendor/github.com/containers/podman/v3/pkg/hooks/monitor.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/monitor.go index ece6e52d17b..d2d7140a569 100644 --- a/vendor/github.com/containers/podman/v3/pkg/hooks/monitor.go +++ b/vendor/github.com/containers/podman/v4/pkg/hooks/monitor.go @@ -3,7 +3,7 @@ package hooks import ( "context" - current "github.com/containers/podman/v3/pkg/hooks/1.0.0" + current "github.com/containers/podman/v4/pkg/hooks/1.0.0" "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/read.go b/vendor/github.com/containers/podman/v4/pkg/hooks/read.go similarity index 95% rename from vendor/github.com/containers/podman/v3/pkg/hooks/read.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/read.go index e7e5aa66f32..c48d51f7dec 100644 --- a/vendor/github.com/containers/podman/v3/pkg/hooks/read.go +++ b/vendor/github.com/containers/podman/v4/pkg/hooks/read.go @@ -8,8 +8,8 @@ import ( "path/filepath" "strings" - old "github.com/containers/podman/v3/pkg/hooks/0.1.0" - current "github.com/containers/podman/v3/pkg/hooks/1.0.0" + old "github.com/containers/podman/v4/pkg/hooks/0.1.0" + current "github.com/containers/podman/v4/pkg/hooks/1.0.0" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v3/pkg/hooks/version.go b/vendor/github.com/containers/podman/v4/pkg/hooks/version.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/hooks/version.go rename to vendor/github.com/containers/podman/v4/pkg/hooks/version.go diff --git a/vendor/github.com/containers/podman/v3/pkg/inspect/inspect.go b/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go similarity index 92% rename from vendor/github.com/containers/podman/v3/pkg/inspect/inspect.go rename to vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go index 5caeae1f09c..767d86daf59 100644 --- a/vendor/github.com/containers/podman/v3/pkg/inspect/inspect.go +++ b/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go @@ -4,12 +4,12 @@ import ( "time" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v3/libpod/define" + "github.com/containers/podman/v4/libpod/define" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) -// ImageData holds the inspect information of an image +// ImageData holds the inspect information of an image. type ImageData struct { ID string `json:"Id"` Digest digest.Digest `json:"Digest"` @@ -36,13 +36,13 @@ type ImageData struct { HealthCheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` } -// RootFS holds the root fs information of an image +// RootFS holds the root fs information of an image. type RootFS struct { Type string `json:"Type"` Layers []digest.Digest `json:"Layers"` } -// ImageResult is used for podman images for collection and output +// ImageResult is used for podman images for collection and output. type ImageResult struct { Tag string Repository string diff --git a/vendor/github.com/rootless-containers/rootlesskit/LICENSE b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/LICENSE similarity index 100% rename from vendor/github.com/rootless-containers/rootlesskit/LICENSE rename to vendor/github.com/containers/podman/v4/pkg/k8s.io/api/LICENSE diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/annotation_key_constants.go new file mode 100644 index 00000000000..7fde09126c6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/annotation_key_constants.go @@ -0,0 +1,151 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/annotation_key_constants.go. + +package v1 + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + // Deprecated: set a pod security context `seccompProfile` field. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + // Deprecated: set a container security context `seccompProfile` field. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. + // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead. + SeccompProfileRuntimeDefault string = "runtime/default" + + // SeccompProfileNameUnconfined is the unconfined seccomp profile. + SeccompProfileNameUnconfined string = "unconfined" + + // SeccompLocalhostProfileNamePrefix is the prefix for specifying profiles loaded from the node's disk. + SeccompLocalhostProfileNamePrefix = "localhost/" + + // AppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile. + AppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" + // AppArmorBetaDefaultProfileAnnotatoinKey is the annotation key specifying the default AppArmor profile. + AppArmorBetaDefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" + // AppArmorBetaAllowedProfileAnnotationKey is the annotation key specifying the allowed AppArmor profiles. + AppArmorBetaAllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" + + // AppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default. + AppArmorBetaProfileRuntimeDefault = "runtime/default" + + // AppArmorBetaProfileNamePrefix is the prefix for specifying profiles loaded on the node. + AppArmorBetaProfileNamePrefix = "localhost/" + + // AppArmorBetaProfileNameUnconfined is the Unconfined AppArmor profile + AppArmorBetaProfileNameUnconfined = "unconfined" + + // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. + // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead. + DeprecatedSeccompProfileDockerDefault string = "docker/default" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" + + // EndpointsOverCapacity will be set on an Endpoints resource when it + // exceeds the maximum capacity of 1000 addresses. Inititially the Endpoints + // controller will set this annotation with a value of "warning". In a + // future release, the controller may set this annotation with a value of + // "truncated" to indicate that any addresses exceeding the limit of 1000 + // have been truncated from the Endpoints resource. + EndpointsOverCapacity = "endpoints.kubernetes.io/over-capacity" + + // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated + // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. + // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or + // CSI Backend for a volume plugin on a specific node. + MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" + + // PodDeletionCost can be used to set to an int32 that represent the cost of deleting + // a pod compared to other pods belonging to the same ReplicaSet. Pods with lower + // deletion cost are preferred to be deleted before pods with higher deletion cost. + // Note that this is honored on a best-effort basis, and so it does not offer guarantees on + // pod deletion order. + // The implicit deletion cost for pods that don't set the annotation is 0, negative values are permitted. + // + // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. + PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" + + // AnnotationTopologyAwareHints can be used to enable or disable Topology + // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any + // other value is treated as "Disabled". + AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" +) diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/resource.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/resource.go new file mode 100644 index 00000000000..2fbb663c7a6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/resource.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource" +) + +// Returns string version of ResourceName. +func (rn ResourceName) String() string { + return string(rn) +} + +// Cpu returns the Cpu limit if specified. +//nolint:revive,stylecheck +func (rl *ResourceList) Cpu() *resource.Quantity { + return rl.Name(ResourceCPU, resource.DecimalSI) +} + +// Memory returns the Memory limit if specified. +func (rl *ResourceList) Memory() *resource.Quantity { + return rl.Name(ResourceMemory, resource.BinarySI) +} + +// Storage returns the Storage limit if specified. +func (rl *ResourceList) Storage() *resource.Quantity { + return rl.Name(ResourceStorage, resource.BinarySI) +} + +// Pods returns the list of pods +func (rl *ResourceList) Pods() *resource.Quantity { + return rl.Name(ResourcePods, resource.DecimalSI) +} + +// StorageEphemeral returns the list of ephemeral storage volumes, if any +func (rl *ResourceList) StorageEphemeral() *resource.Quantity { + return rl.Name(ResourceEphemeralStorage, resource.BinarySI) +} + +// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format. +func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity { + if val, ok := (*rl)[name]; ok { + return &val + } + return &resource.Quantity{Format: defaultFormat} +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go new file mode 100644 index 00000000000..48f353cc63c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go @@ -0,0 +1,4468 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource" + metav1 "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types" + "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr" +) + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +type Volume struct { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name"` + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + VolumeSource `json:",inline"` +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + ClaimName string `json:"claimName"` + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + ReadOnly bool `json:"readOnly,omitempty"` +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +type PersistentVolumeSource struct { + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes +type PersistentVolume struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + Spec PersistentVolumeSpec `json:"spec,omitempty"` + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + Status PersistentVolumeStatus `json:"status,omitempty"` +} + +// PersistentVolumeSpec is the specification of a persistent volume. +type PersistentVolumeSpec struct { + // A description of the persistent volume's resources and capacity. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + Capacity ResourceList `json:"capacity,omitempty"` + // The actual volume backing the persistent volume. + PersistentVolumeSource `json:",inline"` + // AccessModes contains all ways the volume can be mounted. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding + // +optional + ClaimRef *ObjectReference `json:"claimRef,omitempty"` + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default for manually created PersistentVolumes), Delete (default + // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). + // Recycle must be supported by the volume plugin underlying this PersistentVolume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string `json:"storageClassName,omitempty"` + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options + // +optional + MountOptions []string `json:"mountOptions,omitempty"` + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty"` + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty"` +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +type VolumeNodeAffinity struct { + // Required specifies hard node constraints that must be met. + Required *NodeSelector `json:"required,omitempty"` +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + +// PersistentVolumeStatus is the current status of a persistent volume. +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase + // +optional + Phase PersistentVolumePhase `json:"phase,omitempty"` + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string `json:"message,omitempty"` + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + Reason string `json:"reason,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeList is a list of PersistentVolume items. +type PersistentVolumeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // List of persistent volumes. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + Items []PersistentVolume `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec PersistentVolumeClaimSpec `json:"spec,omitempty"` + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Status PersistentVolumeClaimStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +type PersistentVolumeClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // A list of persistent volume claims. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + Items []PersistentVolumeClaim `json:"items"` +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // A label query over volumes to consider for binding. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` + // Resources represents the minimum resources the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty"` + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + VolumeName string `json:"volumeName,omitempty"` + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty"` + // This field can be used to specify either: + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + // * An existing PVC (PersistentVolumeClaim) + // If the provisioner or an external controller can support the specified data source, + // it will create a new volume based on the contents of the specified data source. + // If the AnyVolumeDataSource feature gate is enabled, this field will always have + // the same contents as the DataSourceRef field. + // +optional + DataSource *TypedLocalObjectReference `json:"dataSource,omitempty"` + // Specifies the object from which to populate the volume with data, if a non-empty + // volume is desired. This may be any local object from a non-empty API group (non + // core object) or a PersistentVolumeClaim object. + // When this field is specified, volume binding will only succeed if the type of + // the specified object matches some installed volume populator or dynamic + // provisioner. + // This field will replace the functionality of the DataSource field and as such + // if both fields are non-empty, they must have the same value. For backwards + // compatibility, both fields (DataSource and DataSourceRef) will be set to the same + // value automatically if one of them is empty and the other is non-empty. + // There are two important differences between DataSource and DataSourceRef: + // * While DataSource only allows two specific types of objects, DataSourceRef + // allows any non-core object, as well as PersistentVolumeClaim objects. + // * While DataSource ignores disallowed values (dropping them), DataSourceRef + // preserves all values, and generates an error if a disallowed value is + // specified. + // (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // +optional + DataSourceRef *TypedLocalObjectReference `json:"dataSourceRef,omitempty"` +} + +// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +type PersistentVolumeClaimConditionType string + +const ( + // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" + // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node + PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" +) + +// PersistentVolumeClaimCondition contails details about state of pvc +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType `json:"type"` + Status ConditionStatus `json:"status"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // persistent volume is being resized. + // +optional + Reason string `json:"reason,omitempty"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + Phase PersistentVolumeClaimPhase `json:"phase,omitempty"` + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // Represents the actual resources of the underlying volume. + // +optional + Capacity ResourceList `json:"capacity,omitempty"` + // Current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'ResizeStarted'. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted in read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" + // can be mounted in read/write mode to exactly 1 pod + // cannot be used in combination with other access modes + ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // Path of the directory on the host. + // If the path is a symlink, it will follow the link to the real path. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + Path string `json:"path"` + // Type for HostPath Volume + // Defaults to "" + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + Type *HostPathType `json:"type,omitempty"` +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + Medium StorageMedium `json:"medium,omitempty"` + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +// +structType=atomic +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string `json:"name,omitempty"` + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string `json:"namespace,omitempty"` +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this + StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages + StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages- +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" + // ProtocolSCTP is the SCTP protocol. + ProtocolSCTP Protocol = "SCTP" +) + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + SecretName string `json:"secretName,omitempty"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty"` + // Optional: mode bits used to set permissions on created files by default. + // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + // YAML accepts both octal and decimal values, JSON requires decimal values + // for mode bits. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty"` + // Specify whether the Secret or its keys must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +const ( + SecretVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference `json:",inline"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference `json:",inline"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty"` + // Optional: mode bits used to set permissions on created files by default. + // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + // Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty"` + // Specify whether the ConfigMap or its keys must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +const ( + ConfigMapVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference `json:",inline"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty"` + // Specify whether the ConfigMap or its keys must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +type ServiceAccountTokenProjection struct { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + //+optional + Audience string `json:"audience,omitempty"` + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + //+optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` + // Path is the path relative to the mount point of the file to project the + // token into. + Path string `json:"path"` +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + // +optional + Sources []VolumeProjection `json:"sources"` + // Mode bits used to set permissions on created files by default. + // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty"` +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + // +optional + Secret *SecretProjection `json:"secret,omitempty"` + // information about the downwardAPI data to project + // +optional + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty"` + // information about the configMap data to project + // +optional + ConfigMap *ConfigMapProjection `json:"configMap,omitempty"` + // information about the serviceAccountToken data to project + // +optional + ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty"` +} + +const ( + ProjectedVolumeSourceDefaultMode int32 = 0644 +) + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string `json:"key"` + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string `json:"path"` + // Optional: mode bits used to set permissions on this file. + // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + // If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty"` +} + +// PersistentVolumeClaimTemplate is used to produce +// PersistentVolumeClaim objects as part of an EphemeralVolumeSource. +type PersistentVolumeClaimTemplate struct { + // May contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The specification for the PersistentVolumeClaim. The entire content is + // copied unchanged into the PVC that gets created from this + // template. The same fields as in a PersistentVolumeClaim + // are also valid here. + Spec PersistentVolumeClaimSpec `json:"spec"` +} + +// ContainerPort represents a network port in a single container. +type ContainerPort struct { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + Name string `json:"name,omitempty"` + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + HostPort int32 `json:"hostPort,omitempty"` + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + ContainerPort int32 `json:"containerPort"` + // Protocol for port. Must be UDP, TCP, or SCTP. + // Defaults to "TCP". + // +optional + // +default="TCP" + Protocol Protocol `json:"protocol,omitempty"` + // What host IP to bind the external port to. + // +optional + HostIP string `json:"hostIP,omitempty"` +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // This must match the Name of a Volume. + Name string `json:"name"` + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty"` + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + MountPath string `json:"mountPath"` + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string `json:"subPath,omitempty"` + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationNone is used. + // This field is beta in 1.10. + // +optional + MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty"` + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // +optional + SubPathExpr string `json:"subPathExpr,omitempty"` +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationNone means that the volume in a container will + // not receive new mounts from the host or other containers, and filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode corresponds to "private" in Linux terminology. + MountPropagationNone MountPropagationMode = "None" + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// volumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string `json:"name"` + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string `json:"devicePath"` +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Name of the environment variable. Must be a C_IDENTIFIER. + Name string `json:"name"` + + // Optional: no more than one of the following may be specified. + + // Variable references $(VAR_NAME) are expanded + // using the previously defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + // "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + // Escaped references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + Value string `json:"value,omitempty"` + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + ValueFrom *EnvVarSource `json:"valueFrom,omitempty"` +} + +// EnvVarSource represents a source for the value of an EnvVar. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` + // Selects a key of a secret in the pod's namespace + // +optional + SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty"` +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +// +structType=atomic +type ObjectFieldSelector struct { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Path of the field to select in the specified API version. + FieldPath string `json:"fieldPath"` +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +// +structType=atomic +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string `json:"containerName,omitempty"` + // Required: resource to select + Resource string `json:"resource"` + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity `json:"divisor,omitempty"` +} + +// Selects a key from a ConfigMap. +// +structType=atomic +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline"` + // The key to select. + Key string `json:"key"` + // Specify whether the ConfigMap or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +// SecretKeySelector selects a key of a Secret. +// +structType=atomic +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference `json:",inline"` + // The key of the secret to select from. Must be a valid secret key. + Key string `json:"key"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string `json:"prefix,omitempty"` + // The ConfigMap to select from + // +optional + ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty"` + // The Secret to select from + // +optional + SecretRef *SecretEnvSource `json:"secretRef,omitempty"` +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline"` + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference `json:",inline"` + // Specify whether the Secret must be defined + // +optional + Optional *bool `json:"optional,omitempty"` +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string `json:"name"` + // The header field value + Value string `json:"value"` +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Path to access on the HTTP server. + // +optional + Path string `json:"path,omitempty"` + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port"` + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + Host string `json:"host,omitempty"` + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + Scheme URIScheme `json:"scheme,omitempty"` + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty"` +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port"` + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string `json:"host,omitempty"` +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + Command []string `json:"command,omitempty"` +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler `json:",inline"` + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty"` + // Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + // value overrides the value provided by the pod spec. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// PreemptionPolicy describes a policy for if/when to preempt a pod. +type PreemptionPolicy string + +const ( + // PreemptLowerPriority means that pod can preempt other pods with lower priority. + PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" + // PreemptNever means that pod never preempts other pods with lower priority. + PreemptNever PreemptionPolicy = "Never" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Adds and removes POSIX capabilities from running containers. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability `json:"add,omitempty"` + // Removed capabilities + // +optional + Drop []Capability `json:"drop,omitempty"` +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Limits ResourceList `json:"limits,omitempty"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Requests ResourceList `json:"requests,omitempty"` +} + +const ( + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// A single application container that you want to run within a pod. +type Container struct { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources ResourceRequirements `json:"resources,omitempty"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty"` +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction `json:"exec,omitempty"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction `json:"httpGet,omitempty"` + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty"` +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + PostStart *Handler `json:"postStart,omitempty"` + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness/startup probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + PreStop *Handler `json:"preStop,omitempty"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ContainerStateWaiting is a waiting state of a container. +type ContainerStateWaiting struct { + // (brief) reason the container is not yet running. + // +optional + Reason string `json:"reason,omitempty"` + // Message regarding why the container is not yet running. + // +optional + Message string `json:"message,omitempty"` +} + +// ContainerStateRunning is a running state of a container. +type ContainerStateRunning struct { + // Time at which the container was last (re-)started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty"` +} + +// ContainerStateTerminated is a terminated state of a container. +type ContainerStateTerminated struct { + // Exit status from the last termination of the container + ExitCode int32 `json:"exitCode"` + // Signal from the last termination of the container + // +optional + Signal int32 `json:"signal,omitempty"` + // (brief) reason from the last termination of the container + // +optional + Reason string `json:"reason,omitempty"` + // Message regarding the last termination of the container + // +optional + Message string `json:"message,omitempty"` + // Time at which previous execution of the container started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty"` + // Time at which the container last terminated + // +optional + FinishedAt metav1.Time `json:"finishedAt,omitempty"` + // Container's ID in the format 'docker://' + // +optional + ContainerID string `json:"containerID,omitempty"` +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // Details about a waiting container + // +optional + Waiting *ContainerStateWaiting `json:"waiting,omitempty"` + // Details about a running container + // +optional + Running *ContainerStateRunning `json:"running,omitempty"` + // Details about a terminated container + // +optional + Terminated *ContainerStateTerminated `json:"terminated,omitempty"` +} + +// ContainerStatus contains details for the current status of this container. +type ContainerStatus struct { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + Name string `json:"name"` + // Details about the container's current condition. + // +optional + State ContainerState `json:"state,omitempty"` + // Details about the container's last termination condition. + // +optional + LastTerminationState ContainerState `json:"lastState,omitempty"` + // Specifies whether the container has passed its readiness probe. + Ready bool `json:"ready"` + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 `json:"restartCount"` + // The image the container is running. + // More info: https://kubernetes.io/docs/concepts/containers/images + // TODO(dchen1107): Which image the container is running with? + Image string `json:"image"` + // ImageID of the container's image. + ImageID string `json:"imageID"` + // Container's ID in the format 'docker://'. + // +optional + ContainerID string `json:"containerID,omitempty"` + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + Started *bool `json:"started,omitempty"` +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + // Deprecated: It isn't being set since 2015 (74da3b14b0c0f658b3bb8d2def5094686d0e9095) + PodUnknown PodPhase = "Unknown" +) + +// PodConditionType is a valid value for PodCondition.Type +type PodConditionType string + +// These are valid conditions of pod. +const ( + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" +) + +// These are reasons for a pod's transition to a condition. +const ( + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +// PodCondition contains details for the current condition of this pod. +type PodCondition struct { + // Type is the type of the condition. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + Type PodConditionType `json:"type"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + Status ConditionStatus `json:"status"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +const ( + // DefaultTerminationGracePeriodSeconds indicates the default duration in + // seconds a pod needs to terminate gracefully. + DefaultTerminationGracePeriodSeconds = 30 +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +// +structType=atomic +type NodeSelector struct { + // Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"` +} + +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +// +structType=atomic +type NodeSelectorTerm struct { + // A list of node selector requirements by node's labels. + // +optional + MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty"` + // A list of node selector requirements by node's fields. + // +optional + MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty"` +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string `json:"key"` + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator `json:"operator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty"` +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +// +structType=atomic +type TopologySelectorTerm struct { + // Usage: Fields of type []TopologySelectorTerm must be listType=atomic. + + // A list of topology selector requirements by labels. + // +optional + MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty"` +} + +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +type TopologySelectorLabelRequirement struct { + // The label key that the selector applies to. + Key string `json:"key"` + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + Values []string `json:"values"` +} + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity `json:"podAffinity,omitempty"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 `json:"weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + // namespaces specifies a static list of namespace names that the term applies to. + // The term is applied to the union of the namespaces listed in this field + // and the ones selected by namespaceSelector. + // null or empty namespaces list and null namespaceSelector means "this pod's namespace" + // +optional + Namespaces []string `json:"namespaces,omitempty"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey"` + // A label query over the set of namespaces that the term applies to. + // The term is applied to the union of the namespaces selected by this field + // and the ones listed in the namespaces field. + // null selector and null or empty namespaces list means "this pod's namespace". + // An empty selector ({}) matches all namespaces. + // This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 `json:"weight"` + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm `json:"preference"` +} + +// PodReadinessGate contains the reference to a pod condition +type PodReadinessGate struct { + // ConditionType refers to a condition in the pod's condition list with matching type. + ConditionType PodConditionType `json:"conditionType"` +} + +// PodSpec is a description of a pod. +type PodSpec struct { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // +patchMergeKey=name + // +patchStrategy=merge + Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name"` + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + // +optional + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty"` + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + // +mapType=atomic + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + DeprecatedServiceAccount string `json:"serviceAccount,omitempty"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty"` + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + HostNetwork bool `json:"hostNetwork,omitempty"` + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool `json:"hostPID,omitempty"` + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool `json:"hostIPC,omitempty"` + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty"` + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext `json:"securityContext,omitempty"` + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string `json:"hostname,omitempty"` + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string `json:"subdomain,omitempty"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity `json:"affinity,omitempty"` + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty"` + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=ip + // +patchStrategy=merge + HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip"` + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 `json:"priority,omitempty"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"` + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + // +optional + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty"` + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + // This is a beta feature as of Kubernetes v1.14. + // +optional + RuntimeClassName *string `json:"runtimeClassName,omitempty"` + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"` + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is beta-level, gated by the NonPreemptingPriority feature-gate. + // +optional + PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty"` + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead ResourceList `json:"overhead,omitempty"` + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey"` + // If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + // In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + // In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + // If a pod does not have FQDN, this has no effect. + // Default to false. + // +optional + SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"` +} + +type UnsatisfiableConstraintAction string + +const ( + // DoNotSchedule instructs the scheduler not to schedule the pod + // when constraints are not satisfied. + DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" + // ScheduleAnyway instructs the scheduler to schedule the pod + // even if constraints are not satisfied. + ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" +) + +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +type TopologySpreadConstraint struct { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + // between the number of matching pods in the target topology and the global minimum. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + // to topologies that satisfy it. + // It's a required field. Default value is 1 and 0 is not allowed. + MaxSkew int32 `json:"maxSkew"` + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + TopologyKey string `json:"topologyKey"` + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it. + // - ScheduleAnyway tells the scheduler to schedule the pod in any location, + // but giving higher precedence to topologies that would help reduce the + // skew. + // A constraint is considered "Unsatisfiable" for an incoming pod + // if and only if every possible node assignment for that pod would violate + // "MaxSkew" on some topology. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable"` + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` +} + +const ( + // The default value for enableServiceLinks attribute. + DefaultEnableServiceLinks = true +) + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + // IP address of the host file entry. + IP string `json:"ip,omitempty"` + // Hostnames for the above IP address. + Hostnames []string `json:"hostnames,omitempty"` +} + +// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume +// when volume is mounted. +type PodFSGroupChangePolicy string + +const ( + // FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed + // only when permission and ownership of root directory does not match with expected + // permissions on the volume. This can help shorten the time it takes to change + // ownership and permissions of a volume. + FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch" + // FSGroupChangeAlways indicates that volume's ownership and permissions + // should always be changed whenever volume is mounted inside a Pod. This the default + // behavior. + FSGroupChangeAlways PodFSGroupChangePolicy = "Always" +) + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty"` + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsGroup *int64 `json:"runAsGroup,omitempty"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 `json:"fsGroup,omitempty"` + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + Sysctls []Sysctl `json:"sysctls,omitempty"` + // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + // before being exposed inside Pod. This field will only apply to + // volume types which support fsGroup based ownership(and permissions). + // It will have no effect on ephemeral volume types such as: secret, configmaps + // and emptydir. + // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + // +optional + FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` + // The seccomp options to use by the containers in this pod. + // +optional + SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty"` +} + +// SeccompProfile defines a pod/container's seccomp profile settings. +// Only one profile source may be set. +// +union +type SeccompProfile struct { + // type indicates which kind of seccomp profile will be applied. + // Valid options are: + // + // Localhost - a profile defined in a file on the node should be used. + // RuntimeDefault - the container runtime default profile should be used. + // Unconfined - no profile should be applied. + // +unionDiscriminator + Type SeccompProfileType `json:"type"` + // localhostProfile indicates a profile defined in a file on the node should be used. + // The profile must be preconfigured on the node to work. + // Must be a descending path, relative to the kubelet's configured seccomp profile location. + // Must only be set if type is "Localhost". + // +optional + LocalhostProfile *string `json:"localhostProfile,omitempty"` +} + +// SeccompProfileType defines the supported seccomp profile types. +type SeccompProfileType string + +const ( + // SeccompProfileTypeUnconfined indicates no seccomp profile is applied (A.K.A. unconfined). + SeccompProfileTypeUnconfined SeccompProfileType = "Unconfined" + // SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile. + SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault" + // SeccompProfileTypeLocalhost indicates a profile defined in a file on the node should be used. + // The file's location is based off the kubelet's deprecated flag --seccomp-profile-root. + // Once the flag support is removed the location will be /seccomp. + SeccompProfileTypeLocalhost SeccompProfileType = "Localhost" +) + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string `json:"nameservers,omitempty"` + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string `json:"searches,omitempty"` + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption `json:"options,omitempty"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string `json:"name,omitempty"` + // +optional + Value *string `json:"value,omitempty"` +} + +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +type PodIP struct { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + IP string `json:"ip,omitempty"` +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +type EphemeralContainerCommon struct { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + Name string `json:"name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + Image string `json:"image,omitempty"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty"` + // Ports are not allowed for ephemeral containers. + Ports []ContainerPort `json:"ports,omitempty"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + Resources ResourceRequirements `json:"resources,omitempty"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath"` + // Probes are not allowed for ephemeral containers. + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty"` + // Probes are not allowed for ephemeral containers. + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty"` + // Probes are not allowed for ephemeral containers. + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty"` + // Lifecycle is not allowed for ephemeral containers. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty"` + // Optional: SecurityContext defines the security options the ephemeral container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty"` +} + +// EphemeralContainerCommon converts to Container. All fields must be kept in sync between +// these two types. +var _ = Container(EphemeralContainerCommon{}) + +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +type EphemeralContainer struct { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + EphemeralContainerCommon `json:",inline"` + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + TargetContainerName string `json:"targetContainerName,omitempty"` +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system, especially if the node that hosts the pod cannot contact the control +// plane. +type PodStatus struct { + // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. + // The conditions array, the reason and message fields, and the individual container status + // arrays contain more detail about the pod's status. + // There are five possible phase values: + // + // Pending: The pod has been accepted by the Kubernetes system, but one or more of the + // container images has not been created. This includes time before being scheduled as + // well as time spent downloading images over the network, which could take a while. + // Running: The pod has been bound to a node, and all of the containers have been created. + // At least one container is still running, or is in the process of starting or restarting. + // Succeeded: All containers in the pod have terminated in success, and will not be restarted. + // Failed: All containers in the pod have terminated, and at least one container has + // terminated in failure. The container either exited with non-zero status or was terminated + // by the system. + // Unknown: For some reason the state of the pod could not be obtained, typically due to an + // error in communicating with the host of the pod. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase + // +optional + Phase PodPhase `json:"phase,omitempty"` + // Current service state of pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // A human readable message indicating details about why the pod is in this condition. + // +optional + Message string `json:"message,omitempty"` + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'Evicted' + // +optional + Reason string `json:"reason,omitempty"` + // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // As a result, this field may be different than PodSpec.nodeName when the pod is + // scheduled. + // +optional + NominatedNodeName string `json:"nominatedNodeName,omitempty"` + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + HostIP string `json:"hostIP,omitempty"` + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + PodIP string `json:"podIP,omitempty"` + + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + PodIPs []PodIP `json:"podIPs,omitempty"` + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty"` + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +optional + ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // +optional + QOSClass PodQOSClass `json:"qosClass,omitempty"` + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty"` +} + +// +genclient +// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +type Pod struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty"` + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of pods. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + Items []Pod `json:"items"` +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Template defines the pods that will be created from this pod template. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Template PodTemplateSpec `json:"template,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of pod templates + Items []PodTemplate `json:"items"` +} + +// ReplicationControllerSpec is the specification of a replication controller. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + // +mapType=atomic + Selector map[string]string `json:"selector,omitempty"` + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. + // Reference to an object that describes the pod that will be created if insufficient replicas are detected. + // +optional + // TemplateRef *ObjectReference `json:"templateRef,omitempty"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template *PodTemplateSpec `json:"template,omitempty"` +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + Replicas int32 `json:"replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty"` + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty"` + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Represents the latest available observations of a replication controller's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ReplicationControllerSpec `json:"spec,omitempty"` + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ReplicationControllerStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of replication controllers. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicationController `json:"items"` +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +const DefaultClientIPServiceAffinitySeconds int32 = 10800 + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig `json:"clientIP,omitempty"` +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` +} + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the cluster IP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// ServiceInternalTrafficPolicyType describes the type of traffic routing for +// internal traffic +type ServiceInternalTrafficPolicyType string + +const ( + // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + + // ServiceInternalTrafficPolicyLocal only routes to node-local + // endpoints, otherwise drops the traffic + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// These are the valid conditions of a service. +const ( + // LoadBalancerPortsError represents the condition of the requested ports + // on the cloud load balancer instance. + LoadBalancerPortsError = "LoadBalancerPortsError" +) + +// ServiceStatus represents the current status of a service. +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"` + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress `json:"ingress,omitempty"` +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string `json:"ip,omitempty"` + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string `json:"hostname,omitempty"` + + // Ports is a list of records of service ports + // If used, every port defined in the service should have an entry in it + // +listType=atomic + // +optional + Ports []PortStatus `json:"ports,omitempty"` +} + +// IPFamily represents the IP Family (IPv4 or IPv6). This type is used +// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). +type IPFamily string + +const ( + // IPv4Protocol indicates that this IP is IPv4 protocol + IPv4Protocol IPFamily = "IPv4" + // IPv6Protocol indicates that this IP is IPv6 protocol + IPv6Protocol IPFamily = "IPv6" +) + +// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +type IPFamilyPolicyType string + +const ( + // IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily. + // The IPFamily assigned is based on the default IPFamily used by the cluster + // or as identified by service.spec.ipFamilies field + IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack" + // IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when + // the cluster is configured for dual-stack. If the cluster is not configured + // for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not + // set in service.spec.ipFamilies then the service will be assigned the default IPFamily + // configured on the cluster + IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack" + // IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using + // IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The + // IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If + // service.spec.ipFamilies was not provided then it will be assigned according to how they are + // configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative + // IPFamily will be added by apiserver + IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack" +) + +// ServiceSpec describes the attributes that a user creates on a service. +type ServiceSpec struct { + // The list of ports that are exposed by this service. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge + // +listType=map + // +listMapKey=port + // +listMapKey=protocol + Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port"` + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + // +mapType=atomic + Selector map[string]string `json:"selector,omitempty"` + + // clusterIP is the IP address of the service and is usually assigned + // randomly. If an address is specified manually, is in-range (as per + // system configuration), and is not in use, it will be allocated to the + // service; otherwise creation of the service will fail. This field may not + // be changed through updates unless the type field is also being changed + // to ExternalName (which requires this field to be blank) or the type + // field is being changed from ExternalName (in which case this field may + // optionally be specified, as describe above). Valid values are "None", + // empty string (""), or a valid IP address. Setting this to "None" makes a + // "headless service" (no virtual IP), which is useful when direct endpoint + // connections are preferred and proxying is not required. Only applies to + // types ClusterIP, NodePort, and LoadBalancer. If this field is specified + // when creating a Service of type ExternalName, creation will fail. This + // field will be wiped when updating a Service to type ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string `json:"clusterIP,omitempty"` + + // ClusterIPs is a list of IP addresses assigned to this service, and are + // usually assigned randomly. If an address is specified manually, is + // in-range (as per system configuration), and is not in use, it will be + // allocated to the service; otherwise creation of the service will fail. + // This field may not be changed through updates unless the type field is + // also being changed to ExternalName (which requires this field to be + // empty) or the type field is being changed from ExternalName (in which + // case this field may optionally be specified, as describe above). Valid + // values are "None", empty string (""), or a valid IP address. Setting + // this to "None" makes a "headless service" (no virtual IP), which is + // useful when direct endpoint connections are preferred and proxying is + // not required. Only applies to types ClusterIP, NodePort, and + // LoadBalancer. If this field is specified when creating a Service of type + // ExternalName, creation will fail. This field will be wiped when updating + // a Service to type ExternalName. If this field is not specified, it will + // be initialized from the clusterIP field. If this field is specified, + // clients must ensure that clusterIPs[0] and clusterIP have the same + // value. + // + // Unless the "IPv6DualStack" feature gate is enabled, this field is + // limited to one value, which must be the same as the clusterIP field. If + // the feature gate is enabled, this field may hold a maximum of two + // entries (dual-stack IPs, in either order). These IPs must correspond to + // the values of the ipFamilies field. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +listType=atomic + // +optional + ClusterIPs []string `json:"clusterIPs,omitempty"` + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing + // to endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object or + // EndpointSlice objects. If clusterIP is "None", no virtual IP is + // allocated and the endpoints are published as a set of endpoints rather + // than a virtual IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the same endpoints as the clusterIP. + // "LoadBalancer" builds on NodePort and creates an external load-balancer + // (if supported in the current cloud) which routes to the same endpoints + // as the clusterIP. + // "ExternalName" aliases this service to the specified externalName. + // Several other fields do not apply to ExternalName services. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + // +optional + Type ServiceType `json:"type,omitempty"` + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. + // +optional + ExternalIPs []string `json:"externalIPs,omitempty"` + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` + + // externalName is the external reference that discovery mechanisms will + // return as an alias for this service (e.g. a DNS CNAME record). No + // proxying will be involved. Must be a lowercase RFC-1123 hostname + // (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + // +optional + ExternalName string `json:"externalName,omitempty"` + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // This only applies when type is set to LoadBalancer and + // externalTrafficPolicy is set to Local. If a value is specified, is + // in-range, and is not in use, it will be used. If not specified, a value + // will be automatically allocated. External systems (e.g. load-balancers) + // can use this port to determine if a given node holds endpoints for this + // service or not. If this field is specified when creating a Service + // which does not need it, creation will fail. This field will be wiped + // when updating a Service to no longer need it (e.g. changing type). + // +optional + HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty"` + + // publishNotReadyAddresses indicates that any agent which deals with endpoints for this + // Service should disregard any indications of ready/not-ready. + // The primary use case for setting this field is for a StatefulSet's Headless Service to + // propagate SRV DNS records for its Pods for the purpose of peer discovery. + // The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + // Services interpret this to mean that all endpoints are considered "ready" even if the + // Pods themselves are not. Agents which consume only Kubernetes generated endpoints + // through the Endpoints or EndpointSlice resources can safely assume this behavior. + // +optional + PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty"` + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty"` + + // TopologyKeys is tombstoned to show why 16 is reserved protobuf tag. + // TopologyKeys []string `json:"topologyKeys,omitempty"` + + // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. + // IPFamily *IPFamily `json:"ipFamily,omitempty"` + + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + // service, and is gated by the "IPv6DualStack" feature gate. This field + // is usually assigned automatically based on cluster configuration and the + // ipFamilyPolicy field. If this field is specified manually, the requested + // family is available in the cluster, and ipFamilyPolicy allows it, it + // will be used; otherwise creation of the service will fail. This field + // is conditionally mutable: it allows for adding or removing a secondary + // IP family, but it does not allow changing the primary IP family of the + // Service. Valid values are "IPv4" and "IPv6". This field only applies + // to Services of types ClusterIP, NodePort, and LoadBalancer, and does + // apply to "headless" services. This field will be wiped when updating a + // Service to type ExternalName. + // + // This field may hold a maximum of two entries (dual-stack families, in + // either order). These families must correspond to the values of the + // clusterIPs field, if specified. Both clusterIPs and ipFamilies are + // governed by the ipFamilyPolicy field. + // +listType=atomic + // +optional + IPFamilies []IPFamily `json:"ipFamilies,omitempty"` + + // IPFamilyPolicy represents the dual-stack-ness requested or required by + // this Service, and is gated by the "IPv6DualStack" feature gate. If + // there is no value provided, then this field will be set to SingleStack. + // Services can be "SingleStack" (a single IP family), "PreferDualStack" + // (two IP families on dual-stack configured clusters or a single IP family + // on single-stack clusters), or "RequireDualStack" (two IP families on + // dual-stack configured clusters, otherwise fail). The ipFamilies and + // clusterIPs fields depend on the value of this field. This field will be + // wiped when updating a service to type ExternalName. + // +optional + IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty"` + + // allocateLoadBalancerNodePorts defines if NodePorts will be automatically + // allocated for services with type LoadBalancer. Default is "true". It + // may be set to "false" if the cluster load-balancer does not rely on + // NodePorts. If the caller requests specific NodePorts (by specifying a + // value), those requests will be respected, regardless of this field. + // This field may only be set for services with type LoadBalancer and will + // be cleared if the type is changed to any other type. + // This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature. + // +featureGate=ServiceLBNodePortControl + // +optional + AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` + + // loadBalancerClass is the class of the load balancer implementation this Service belongs to. + // If specified, the value of this field must be a label-style identifier, with an optional prefix, + // e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + // This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + // balancer implementation is used, today this is typically done through the cloud provider integration, + // but should apply for any default implementation. If set, it is assumed that a load balancer + // implementation is watching for Services with a matching class. Any default load balancer + // implementation (e.g. cloud providers) should ignore Services that set this field. + // This field can only be set when creating or updating a Service to type 'LoadBalancer'. + // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + // +featureGate=LoadBalancerClass + // +optional + LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` + + // InternalTrafficPolicy specifies if the cluster internal traffic + // should be routed to all endpoints or node-local endpoints only. + // "Cluster" routes internal traffic to a Service to all endpoints. + // "Local" routes traffic to node-local endpoints only, traffic is + // dropped if no node-local endpoints are ready. + // The default value is "Cluster". + // +featureGate=ServiceInternalTrafficPolicy + // +optional + InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` +} + +// ServicePort contains information on service's port. +type ServicePort struct { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. + // Optional if only one ServicePort is defined on this service. + // +optional + Name string `json:"name,omitempty"` + + // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + // Default is TCP. + // +default="TCP" + // +optional + Protocol Protocol `json:"protocol,omitempty"` + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names such as + // mycompany.com/my-custom-protocol. + // +optional + AppProtocol *string `json:"appProtocol,omitempty"` + + // The port that will be exposed by this service. + Port int32 `json:"port"` + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // +optional + TargetPort intstr.IntOrString `json:"targetPort,omitempty"` + + // The port on each node on which this service is exposed when type is + // NodePort or LoadBalancer. Usually assigned by the system. If a value is + // specified, in-range, and not in use it will be used, otherwise the + // operation will fail. If not specified, a port will be allocated if this + // Service requires one. If this field is specified when creating a + // Service which does not need it, creation will fail. This field will be + // wiped when updating a Service to no longer need it (e.g. changing type + // from NodePort to ClusterIP). + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + // +optional + NodePort int32 `json:"nodePort,omitempty"` +} + +// +genclient +// +genclient:skipVerbs=deleteCollection +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the behavior of a service. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceSpec `json:"spec,omitempty"` + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceStatus `json:"status,omitempty"` +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of services + Items []Service `json:"items"` +} + +// +genclient +// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of ServiceAccounts. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + Items []ServiceAccount `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + // +optional + Subsets []EndpointSubset `json:"subsets,omitempty"` +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + Addresses []EndpointAddress `json:"addresses,omitempty"` + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty"` + // Port numbers available on the related IP addresses. + // +optional + Ports []EndpointPort `json:"ports,omitempty"` +} + +// EndpointAddress is a tuple that describes single IP address. +// +structType=atomic +type EndpointAddress struct { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + IP string `json:"ip"` + // The Hostname of this endpoint + // +optional + Hostname string `json:"hostname,omitempty"` + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string `json:"nodeName,omitempty"` + // Reference to object providing the endpoint. + // +optional + TargetRef *ObjectReference `json:"targetRef,omitempty"` +} + +// EndpointPort is a tuple that describes a single port. +// +structType=atomic +type EndpointPort struct { + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + Name string `json:"name,omitempty"` + + // The port number of the endpoint. + Port int32 `json:"port"` + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty"` + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names such as + // mycompany.com/my-custom-protocol. + // +optional + AppProtocol *string `json:"appProtocol,omitempty"` +} + +// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. +// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration +type ConfigMapNodeConfigSource struct { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + Namespace string `json:"namespace"` + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + Name string `json:"name"` + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + UID types.UID `json:"uid,omitempty"` + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty"` + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + KubeletConfigKey string `json:"kubeletConfigKey"` +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // +optional + Names []string `json:"names"` + // The size of the image in bytes. + // +optional + SizeBytes int64 `json:"sizeBytes,omitempty"` +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" +) + +const ( + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" + // Name prefix for storage resource limits + ResourceAttachableVolumesPrefix = "attachable-volumes-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// PodLogOptions is the query options for a Pod's logs REST call. +type PodLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + Container string `json:"container,omitempty"` + // Follow the log stream of the pod. Defaults to false. + // +optional + Follow bool `json:"follow,omitempty"` + // Return previous terminated container logs. Defaults to false. + // +optional + Previous bool `json:"previous,omitempty"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceSeconds *int64 `json:"sinceSeconds,omitempty"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceTime *metav1.Time `json:"sinceTime,omitempty"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + Timestamps bool `json:"timestamps,omitempty"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + TailLines *int64 `json:"tailLines,omitempty"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + LimitBytes *int64 `json:"limitBytes,omitempty"` + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodAttachOptions struct { + metav1.TypeMeta `json:",inline"` + + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty"` + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty"` + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty"` + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty"` + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodExecOptions struct { + metav1.TypeMeta `json:",inline"` + + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty"` + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty"` + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty"` + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty"` + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty"` + + // Command is the remote command to execute. argv array. Not executed within a shell. + Command []string `json:"command"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +type PodPortForwardOptions struct { + metav1.TypeMeta `json:",inline"` + + // List of ports to forward + // Required when using WebSockets + // +optional + Ports []int32 `json:"ports,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodProxyOptions is the query options to a Pod's proxy call. +type PodProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to pod. + // +optional + Path string `json:"path,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeProxyOptions is the query options to a Node's proxy call. +type NodeProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to node. + // +optional + Path string `json:"path,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + Path string `json:"path,omitempty"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. +// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +// and the version of the actual struct is irrelevant. +// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +structType=atomic +type ObjectReference struct { + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + Namespace string `json:"namespace,omitempty"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + Name string `json:"name,omitempty"` + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + UID types.UID `json:"uid,omitempty"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Specific resourceVersion to which this reference is made, if any. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty"` + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string `json:"fieldPath,omitempty"` +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +// +structType=atomic +type LocalObjectReference struct { + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + Name string `json:"name,omitempty"` +} + +// TypedLocalObjectReference contains enough information to let you locate the +// typed referenced object inside the same namespace. +// +structType=atomic +type TypedLocalObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string `json:"apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind"` + // Name is the name of resource being referenced + Name string `json:"name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SerializedReference is a reference to serialized object. +type SerializedReference struct { + metav1.TypeMeta `json:",inline"` + // The reference to an object in the system. + // +optional + Reference ObjectReference `json:"reference,omitempty"` +} + +// EventSource contains information for an event. +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string `json:"component,omitempty"` + // Node name on which the event is generated. + // +optional + Host string `json:"host,omitempty"` +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. Events +// have a limited retention time and triggers and messages may evolve +// with time. Event consumers should not rely on the timing of an event +// with a given Reason reflecting a consistent underlying trigger, or the +// continued existence of events with that Reason. Events should be +// treated as informative, best-effort, supplemental data. +type Event struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // The object that this event is about. + InvolvedObject ObjectReference `json:"involvedObject"` + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + Reason string `json:"reason,omitempty"` + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string `json:"message,omitempty"` + + // The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource `json:"source,omitempty"` + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty"` + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time `json:"lastTimestamp,omitempty"` + + // The number of times this event has occurred. + // +optional + Count int32 `json:"count,omitempty"` + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + Type string `json:"type,omitempty"` + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime `json:"eventTime,omitempty"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty"` + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string `json:"action,omitempty"` + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference `json:"related,omitempty"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingComponent"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance"` +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count,omitempty"` + // Time of the last occurrence observed + LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty"` + + // +k8s:deprecated=state,protobuf=3 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of events + Items []Event `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitType is a type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +type LimitRangeItem struct { + // Type of resource that this limit applies to. + Type LimitType `json:"type"` + // Max usage constraints on this kind by resource name. + // +optional + Max ResourceList `json:"max,omitempty"` + // Min usage constraints on this kind by resource name. + // +optional + Min ResourceList `json:"min,omitempty"` + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + Default ResourceList `json:"default,omitempty"` + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + DefaultRequest ResourceList `json:"defaultRequest,omitempty"` + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty"` +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced. + Limits []LimitRangeItem `json:"limits"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +type LimitRange struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the limits enforced. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LimitRangeSpec `json:"spec,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of LimitRange objects. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + Items []LimitRange `json:"items"` +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" + // Default resource requests prefix + DefaultResourceRequestsPrefix = "requests." +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds >=0 + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where spec.activeDeadlineSeconds is nil + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" + // Match all pod objects that have priority class mentioned + ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" + // Match all pod objects that have cross-namespace pod (anti)affinity mentioned. + // This is a beta feature enabled by the PodAffinityNamespaceSelector feature flag. + ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +type ResourceQuotaSpec struct { + // hard is the set of desired hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + // +optional + Hard ResourceList `json:"hard,omitempty"` + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope `json:"scopes,omitempty"` + // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + // +optional + ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty"` +} + +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector requirements. +// +structType=atomic +type ScopeSelector struct { + // A list of scope selector requirements by scope of the resources. + // +optional + MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty"` +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +type ScopedResourceSelectorRequirement struct { + // The name of the scope that the selector applies to. + ScopeName ResourceQuotaScope `json:"scopeName"` + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + Operator ScopeSelectorOperator `json:"operator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty"` +} + +// A scope selector operator is the set of operators that can be used in +// a scope selector requirement. +type ScopeSelectorOperator string + +const ( + ScopeSelectorOpIn ScopeSelectorOperator = "In" + ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" + ScopeSelectorOpExists ScopeSelectorOperator = "Exists" + ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" +) + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + // +optional + Hard ResourceList `json:"hard,omitempty"` + // Used is the current observed total usage of the resource in the namespace. + // +optional + Used ResourceList `json:"used,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired quota. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ResourceQuotaSpec `json:"spec,omitempty"` + + // Status defines the actual enforced quota and its current usage. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ResourceQuotaStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of ResourceQuota items. +type ResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of ResourceQuota objects. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + Items []ResourceQuota `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Immutable, if set to true, ensures that data stored in the Secret cannot + // be updated (only object metadata can be modified). + // If not set to true, the field can be modified at any time. + // Defaulted to nil. + // +optional + Immutable *bool `json:"immutable,omitempty"` + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + Data map[string][]byte `json:"data,omitempty"` + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only input field for convenience. + // All keys and values are merged into the data field on write, overwriting any existing values. + // The stringData field is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + StringData map[string]string `json:"stringData,omitempty"` + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType `json:"type,omitempty"` +} + +type SecretType string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretList is a list of Secret. +type SecretList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + Items []Secret `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMap holds configuration data for pods to consume. +type ConfigMap struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Immutable, if set to true, ensures that data stored in the ConfigMap cannot + // be updated (only object metadata can be modified). + // If not set to true, the field can be modified at any time. + // Defaulted to nil. + // +optional + Immutable *bool `json:"immutable,omitempty"` + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. + // +optional + Data map[string]string `json:"data,omitempty"` + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + BinaryData map[string][]byte `json:"binaryData,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of ConfigMaps. + Items []ConfigMap `json:"items"` +} + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +// Information about the condition of a component. +type ComponentCondition struct { + // Type of condition for a component. + // Valid value: "Healthy" + Type ComponentConditionType `json:"type"` + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + Status ConditionStatus `json:"status"` + // Message about the condition for a component. + // For example, information about a health check. + // +optional + Message string `json:"message,omitempty"` + // Condition error code for a component. + // For example, a health check error code. + // +optional + Error string `json:"error,omitempty"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +// Deprecated: This API is deprecated in v1.19+ +type ComponentStatus struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // List of component conditions observed + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Status of all the conditions for the component as a list of ComponentStatus objects. +// Deprecated: This API is deprecated in v1.19+ +type ComponentStatusList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // List of ComponentStatus objects. + Items []ComponentStatus `json:"items"` +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of downward API volume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty"` + // Optional: mode bits to use on created files by default. Must be a + // Optional: mode bits used to set permissions on created files by default. + // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + // Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty"` +} + +const ( + DownwardAPIVolumeSourceDefaultMode int32 = 0644 +) + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string `json:"path"` + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` + // Optional: mode bits used to set permissions on this file, must be an octal value + // between 0000 and 0777 or a decimal value between 0 and 511. + // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + // If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty"` +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty"` +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities `json:"capabilities,omitempty"` + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool `json:"privileged,omitempty"` + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty"` + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsGroup *int64 `json:"runAsGroup,omitempty"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // AllowPrivilegeEscalation is true always when the container is: + // 1) run as Privileged + // 2) has CAP_SYS_ADMIN + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` + // procMount denotes the type of proc mount to use for the containers. + // The default is DefaultProcMount which uses the container runtime defaults for + // readonly paths and masked paths. + // This requires the ProcMountType feature flag to be enabled. + // +optional + ProcMount *ProcMountType `json:"procMount,omitempty"` + // The seccomp options to use by this container. If seccomp options are + // provided at both the pod & container level, the container options + // override the pod options. + // +optional + SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty"` +} + +type ProcMountType string + +const ( + // DefaultProcMount uses the container runtime defaults for readonly and masked + // paths for /proc. Most container runtimes mask certain paths in /proc to avoid + // accidental security exposure of special devices or information. + DefaultProcMount ProcMountType = "Default" + + // UnmaskedProcMount bypasses the default masking behavior of the container + // runtime and ensures the newly created /proc the container stays in tact with + // no modifications. + UnmaskedProcMount ProcMountType = "Unmasked" +) + +// SELinuxOptions are the labels to be applied to the container +type SELinuxOptions struct { + // User is a SELinux user label that applies to the container. + // +optional + User string `json:"user,omitempty"` + // Role is a SELinux role label that applies to the container. + // +optional + Role string `json:"role,omitempty"` + // Type is a SELinux type label that applies to the container. + // +optional + Type string `json:"type,omitempty"` + // Level is SELinux level label that applies to the container. + // +optional + Level string `json:"level,omitempty"` +} + +const ( + // DefaultSchedulerName defines the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string `json:"name"` + // Value of a property to set + Value string `json:"value"` +} + +// NodeResources is an object for conveying resource information about a node. +// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList +} + +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// PortStatus represents the error condition of a service port + +type PortStatus struct { + // Port is the port number of the service port of which status is recorded here + Port int32 `json:"port"` + // Protocol is the protocol of the service port of which status is recorded here + // The supported values are: "TCP", "UDP", "SCTP" + Protocol Protocol `json:"protocol"` + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/LICENSE b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go new file mode 100644 index 00000000000..9f76f915469 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -0,0 +1,299 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + "strconv" + + inf "gopkg.in/inf.v0" +) + +// Scale is used for getting and setting the base-10 scaled value. +// Base-2 scales are omitted for mathematical simplicity. +// See Quantity.ScaledValue for more details. +type Scale int32 + +// infScale adapts a Scale value to an inf.Scale value. +func (s Scale) infScale() inf.Scale { + return inf.Scale(-s) // inf.Scale is upside-down +} + +const ( + Nano Scale = -9 + Micro Scale = -6 + Milli Scale = -3 + Kilo Scale = 3 + Mega Scale = 6 + Giga Scale = 9 + Tera Scale = 12 + Peta Scale = 15 + Exa Scale = 18 +) + +var ( + Zero = int64Amount{} + + // Used by quantity strings - treat as read only + zeroBytes = []byte("0") +) + +// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster +// than operations on inf.Dec for values that can be represented as int64. +// +k8s:openapi-gen=true +type int64Amount struct { + value int64 + scale Scale +} + +// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0. +func (a int64Amount) Sign() int { + switch { + case a.value == 0: + return 0 + case a.value > 0: + return 1 + default: + return -1 + } +} + +// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be +// represented in an int64 OR would result in a loss of precision. This method is intended as +// an optimization to avoid calling AsDec. +func (a int64Amount) AsInt64() (int64, bool) { + if a.scale == 0 { + return a.value, true + } + if a.scale < 0 { + // TODO: attempt to reduce factors, although it is assumed that factors are reduced prior + // to the int64Amount being created. + return 0, false + } + return positiveScaleInt64(a.value, a.scale) +} + +// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale, +// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result +// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger +// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would +// return 1, because 0.000001 is rounded up to 1. +func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) { + if a.scale < scale { + result, _ = negativeScaleInt64(a.value, scale-a.scale) + return result, true + } + return positiveScaleInt64(a.value, a.scale-scale) +} + +// AsDec returns an inf.Dec representation of this value. +func (a int64Amount) AsDec() *inf.Dec { + var base inf.Dec + base.SetUnscaled(a.value) + base.SetScale(inf.Scale(-a.scale)) + return &base +} + +// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b. +func (a int64Amount) Cmp(b int64Amount) int { + switch { + case a.scale == b.scale: + // compare only the unscaled portion + case a.scale > b.scale: + result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == a.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return -1 + default: + return 1 + } + } + b.value = result + default: + result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == b.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return 1 + default: + return -1 + } + } + a.value = result + } + + switch { + case a.value == b.value: + return 0 + case a.value < b.value: + return -1 + default: + return 1 + } +} + +// Add adds two int64Amounts together, matching scales. It will return false and not mutate +// a if overflow or underflow would result. +func (a *int64Amount) Add(b int64Amount) bool { + switch { + case b.value == 0: + return true + case a.value == 0: + a.value = b.value + a.scale = b.scale + return true + case a.scale == b.scale: + c, ok := int64Add(a.value, b.value) + if !ok { + return false + } + a.value = c + case a.scale > b.scale: + c, ok := positiveScaleInt64(a.value, a.scale-b.scale) + if !ok { + return false + } + c, ok = int64Add(c, b.value) + if !ok { + return false + } + a.scale = b.scale + a.value = c + default: + c, ok := positiveScaleInt64(b.value, b.scale-a.scale) + if !ok { + return false + } + c, ok = int64Add(a.value, c) + if !ok { + return false + } + a.value = c + } + return true +} + +// Sub removes the value of b from the current amount, or returns false if underflow would result. +func (a *int64Amount) Sub(b int64Amount) bool { + return a.Add(int64Amount{value: -b.value, scale: b.scale}) +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { + if a.scale >= scale { + return a, true + } + result, exact := negativeScaleInt64(a.value, scale-a.scale) + return int64Amount{value: result, scale: scale}, exact +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.value + exponent = int32(a.scale) + + amount, times := removeInt64Factors(mantissa, 10) + exponent += int32(times) + + // make sure exponent is a multiple of 3 + var ok bool + switch exponent % 3 { + case 1, -2: + amount, ok = int64MultiplyScale10(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent-- + case 2, -1: + amount, ok = int64MultiplyScale100(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent -= 2 + } + return strconv.AppendInt(out, amount, 10), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + value, ok := a.AsScaledInt64(0) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out) + } + amount, exponent := removeInt64Factors(value, 1024) + return strconv.AppendInt(out, amount, 10), exponent +} + +// infDecAmount implements common operations over an inf.Dec that are specific to the quantity +// representation. +type infDecAmount struct { + *inf.Dec +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, scale.infScale(), inf.RoundUp) + return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0 +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.Dec.UnscaledBig() + exponent = int32(-a.Dec.Scale()) + amount := big.NewInt(0).Set(mantissa) + // move all factors of 10 into the exponent for easy reasoning + amount, times := removeBigIntFactors(amount, bigTen) + exponent += times + + // make sure exponent is a multiple of 3 + for exponent%3 != 0 { + amount.Mul(amount, bigTen) + exponent-- + } + + return append(out, amount.String()...), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, 0, inf.RoundUp) + amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024) + return append(out, amount.String()...), exponent +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/math.go new file mode 100644 index 00000000000..9d03f5c05c4 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/math.go @@ -0,0 +1,304 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + + inf "gopkg.in/inf.v0" +) + +const ( + // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64. + // It is also the maximum decimal digits that can be represented with an int64. + maxInt64Factors = 18 +) + +var ( + // Commonly needed big.Int values-- treat as read only! + bigTen = big.NewInt(10) + bigZero = big.NewInt(0) + bigOne = big.NewInt(1) + big1024 = big.NewInt(1024) + + // Commonly needed inf.Dec values-- treat as read only! + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) + + // Largest (in magnitude) number allowed. + maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 +) + +const mostNegative = -(mostPositive + 1) +const mostPositive = 1<<63 - 1 + +// int64Add returns a+b, or false if that would overflow int64. +func int64Add(a, b int64) (int64, bool) { + c := a + b + switch { + case a > 0 && b > 0: + if c < 0 { + return 0, false + } + case a < 0 && b < 0: + if c > 0 { + return 0, false + } + if a == mostNegative && b == mostNegative { + return 0, false + } + } + return c, true +} + +// int64Multiply returns a*b, or false if that would overflow or underflow int64. +func int64Multiply(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == mostNegative || b == mostNegative { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64. +// Use when b is known to be greater than one. +func int64MultiplyScale(a int64, b int64) (int64, bool) { + if a == 0 || a == 1 { + return a * b, true + } + if a == mostNegative && b != 1 { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale10(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 10, true + } + if a == mostNegative { + return 0, false + } + c := a * 10 + return c, c/10 == a +} + +// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale100(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 100, true + } + if a == mostNegative { + return 0, false + } + c := a * 100 + return c, c/100 == a +} + +// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale1000(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 1000, true + } + if a == mostNegative { + return 0, false + } + c := a * 1000 + return c, c/1000 == a +} + +// positiveScaleInt64 multiplies base by 10^scale, returning false if the +// value overflows. Passing a negative scale is undefined. +func positiveScaleInt64(base int64, scale Scale) (int64, bool) { + switch scale { + case 0: + return base, true + case 1: + return int64MultiplyScale10(base) + case 2: + return int64MultiplyScale100(base) + case 3: + return int64MultiplyScale1000(base) + case 6: + return int64MultiplyScale(base, 1000000) + case 9: + return int64MultiplyScale(base, 1000000000) + default: + value := base + var ok bool + for i := Scale(0); i < scale; i++ { + if value, ok = int64MultiplyScale(value, 10); !ok { + return 0, false + } + } + return value, true + } +} + +// negativeScaleInt64 reduces base by the provided scale, rounding up, until the +// value is zero or the scale is reached. Passing a negative scale is undefined. +// The value returned, if not exact, is rounded away from zero. +func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { + if scale == 0 { + return base, true + } + + value := base + var fraction bool + for i := Scale(0); i < scale; i++ { + if !fraction && value%10 != 0 { + fraction = true + } + value /= 10 + if value == 0 { + if fraction { + if base > 0 { + return 1, false + } + return -1, false + } + return 0, true + } + } + if fraction { + if base > 0 { + value++ + } else { + value-- + } + } + return value, !fraction +} + +func pow10Int64(b int64) int64 { + switch b { + case 0: + return 1 + case 1: + return 10 + case 2: + return 100 + case 3: + return 1000 + case 4: + return 10000 + case 5: + return 100000 + case 6: + return 1000000 + case 7: + return 10000000 + case 8: + return 100000000 + case 9: + return 1000000000 + case 10: + return 10000000000 + case 11: + return 100000000000 + case 12: + return 1000000000000 + case 13: + return 10000000000000 + case 14: + return 100000000000000 + case 15: + return 1000000000000000 + case 16: + return 10000000000000000 + case 17: + return 100000000000000000 + case 18: + return 1000000000000000000 + default: + return 0 + } +} + +// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or +// false if no such division is possible. Dividing by negative scales is undefined. +func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) { + if scale == 0 { + return base, 0, true + } + // the max scale representable in base 10 in an int64 is 18 decimal places + if scale >= 18 { + return 0, base, false + } + divisor := pow10Int64(int64(scale)) + return base / divisor, base % divisor, true +} + +// removeInt64Factors divides in a loop; the return values have the property that +// value == result * base ^ scale +func removeInt64Factors(value int64, base int64) (result int64, times int32) { + times = 0 + result = value + negative := result < 0 + if negative { + result = -result + } + switch base { + // allow the compiler to optimize the common cases + case 10: + for result >= 10 && result%10 == 0 { + times++ + result /= 10 + } + // allow the compiler to optimize the common cases + case 1024: + for result >= 1024 && result%1024 == 0 { + times++ + result /= 1024 + } + default: + for result >= base && result%base == 0 { + times++ + result /= base + } + } + if negative { + result = -result + } + return result, times +} + +// removeBigIntFactors divides in a loop; the return values have the property that +// d == result * factor ^ times +// d may be modified in place. +// If d == 0, then the return values will be (0, 0) +func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) { + q := big.NewInt(0) + m := big.NewInt(0) + for d.Cmp(bigZero) != 0 { + q.DivMod(d, factor, m) + if m.Cmp(bigZero) != 0 { + break + } + times++ + d, q = q, d + } + return d, times +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go new file mode 100644 index 00000000000..965d2ccaf14 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -0,0 +1,693 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "errors" + "fmt" + "math" + "math/big" + "strconv" + "strings" + + inf "gopkg.in/inf.v0" +) + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and AsInt64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Quantity struct { + // i is the quantity in int64 scaled form, if d.Dec == nil + i int64Amount + // d is the quantity in inf.Dec form if d.Dec != nil + d infDecAmount + // s is the generated value of this quantity to avoid recalculation + s string + + // Change Format at will. See the comment for Canonicalize for + // more details. + Format +} + +// CanonicalValue allows a quantity amount to be converted to a string. +type CanonicalValue interface { + // AsCanonicalBytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-10. Callers may + // pass a byte slice to the method to avoid allocations. + AsCanonicalBytes(out []byte) ([]byte, int32) + // AsCanonicalBase1024Bytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-1024. Callers + // may pass a byte slice to the method to avoid allocations. + AsCanonicalBase1024Bytes(out []byte) ([]byte, int32) +} + +// Format lists the three possible formattings of a quantity. +type Format string + +const ( + DecimalExponent = Format("DecimalExponent") // e.g., 12e6 + BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20) + DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6) +) + +// MustParse turns the given string into a quantity or panics; for tests +// or other cases where you know the string is valid. +func MustParse(str string) Quantity { + q, err := ParseQuantity(str) + if err != nil { + panic(fmt.Errorf("cannot parse '%v': %v", str, err)) + } + return q +} + +const ( + // splitREString is used to separate a number from its suffix; as such, + // this is overly permissive, but that's OK-- it will be checked later. + splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" +) + +var ( + // Errors that could happen while parsing a string. + //nolint:revive + ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'") + ErrNumeric = errors.New("unable to parse numeric part of quantity") + ErrSuffix = errors.New("unable to parse quantity's suffix") +) + +// parseQuantityString is a fast scanner for quantity values. +func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) { + positive = true + pos := 0 + end := len(str) + + // handle leading sign + if pos < end { + switch str[0] { + case '-': + positive = false + pos++ + case '+': + pos++ + } + } + + // strip leading zeros +Zeroes: + for i := pos; ; i++ { + if i >= end { + num = "0" + value = num + return + } + switch str[i] { + case '0': + pos++ + default: + break Zeroes + } + } + + // extract the numerator +Num: + for i := pos; ; i++ { + if i >= end { + num = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + num = str[pos:i] + pos = i + break Num + } + } + + // if we stripped all numerator positions, always return 0 + if len(num) == 0 { + num = "0" + } + + // handle a denominator + if pos < end && str[pos] == '.' { + pos++ + Denom: + for i := pos; ; i++ { + if i >= end { + denom = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + denom = str[pos:i] + pos = i + break Denom + } + } + // TODO: we currently allow 1.G, but we may not want to in the future. + // if len(denom) == 0 { + // err = ErrFormatWrong + // return + // } + } + value = str[0:pos] + + // grab the elements of the suffix + suffixStart := pos + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") { + pos = i + break + } + } + if pos < end { + switch str[pos] { + case '-', '+': + pos++ + } + } +Suffix: + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + break Suffix + } + } + // we encountered a non decimal in the Suffix loop, but the last character + // was not a valid exponent + err = ErrFormatWrong + // nolint:nakedret + return +} + +// ParseQuantity turns str into a Quantity, or returns an error. +func ParseQuantity(str string) (Quantity, error) { + if len(str) == 0 { + return Quantity{}, ErrFormatWrong + } + if str == "0" { + return Quantity{Format: DecimalSI, s: str}, nil + } + + positive, value, num, denom, suf, err := parseQuantityString(str) + if err != nil { + return Quantity{}, err + } + + base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf)) + if !ok { + return Quantity{}, ErrSuffix + } + + precision := int32(0) + scale := int32(0) + mantissa := int64(1) + switch format { + case DecimalExponent, DecimalSI: + scale = exponent + precision = maxInt64Factors - int32(len(num)+len(denom)) + case BinarySI: + scale = 0 + switch { + case exponent >= 0 && len(denom) == 0: + // only handle positive binary numbers with the fast path + mantissa = int64(int64(mantissa) << uint64(exponent)) + // 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision + precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1 + default: + precision = -1 + } + } + + if precision >= 0 { + // if we have a denominator, shift the entire value to the left by the number of places in the + // denominator + scale -= int32(len(denom)) + if scale >= int32(Nano) { + shifted := num + denom + + var value int64 + value, err := strconv.ParseInt(shifted, 10, 64) + if err != nil { + return Quantity{}, ErrNumeric + } + if result, ok := int64Multiply(value, int64(mantissa)); ok { + if !positive { + result = -result + } + // if the number is in canonical form, reuse the string + switch format { + case BinarySI: + if exponent%10 == 0 && (value&0x07 != 0) { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + default: + if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + } + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil + } + } + } + + amount := new(inf.Dec) + if _, ok := amount.SetString(value); !ok { + return Quantity{}, ErrNumeric + } + + // So that no one but us has to think about suffixes, remove it. + if base == 10 { + amount.SetScale(amount.Scale() + Scale(exponent).infScale()) + } else if base == 2 { + // numericSuffix = 2 ** exponent + numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent)) + ub := amount.UnscaledBig() + amount.SetUnscaledBig(ub.Mul(ub, numericSuffix)) + } + + // Cap at min/max bounds. + sign := amount.Sign() + if sign == -1 { + amount.Neg(amount) + } + + // This rounds non-zero values up to the minimum representable value, under the theory that + // if you want some resources, you should get some resources, even if you asked for way too small + // of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have + // the side effect of rounding values < .5n to zero. + if v, ok := amount.Unscaled(); v != int64(0) || !ok { + amount.Round(amount, Nano.infScale(), inf.RoundUp) + } + + // The max is just a simple cap. + // TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster + if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 { + amount.Set(maxAllowed.Dec) + } + + if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } + if sign == -1 { + amount.Neg(amount) + } + + return Quantity{d: infDecAmount{amount}, Format: format}, nil +} + +// DeepCopy returns a deep-copy of the Quantity value. Note that the method +// receiver is a value, so we can mutate it in-place and return it. +func (q Quantity) DeepCopy() Quantity { + if q.d.Dec != nil { + tmp := &inf.Dec{} + q.d.Dec = tmp.Set(q.d.Dec) + } + return q +} + +// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). +// +// Note about BinarySI: +// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between +// -1 and +1, it will be emitted as if q.Format were DecimalSI. +// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be +// rounded up. (1.1i becomes 2i.) +func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { + if q.IsZero() { + return zeroBytes, nil + } + + var rounded CanonicalValue + format := q.Format + switch format { + case DecimalExponent, DecimalSI: + case BinarySI: + if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } else { + var exact bool + if rounded, exact = q.AsScale(0); !exact { + // Don't lose precision-- show as DecimalSI + format = DecimalSI + } + } + default: + format = DecimalExponent + } + + // TODO: If BinarySI formatting is requested but would cause rounding, upgrade to + // one of the other formats. + switch format { + case DecimalExponent, DecimalSI: + number, exponent := q.AsCanonicalBytes(out) + suffix, _ := quantitySuffixer.constructBytes(10, exponent, format) + return number, suffix + default: + // format must be BinarySI + number, exponent := rounded.AsCanonicalBase1024Bytes(out) + suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format) + return number, suffix + } +} + +// AsApproximateFloat64 returns a float64 representation of the quantity which may +// lose precision. If the value of the quantity is outside the range of a float64 +// +Inf/-Inf will be returned. +func (q *Quantity) AsApproximateFloat64() float64 { + var base float64 + var exponent int + if q.d.Dec != nil { + base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64() + exponent = int(-q.d.Dec.Scale()) + } else { + base = float64(q.i.value) + exponent = int(q.i.scale) + } + if exponent == 0 { + return base + } + + // multiply by the appropriate exponential scale + switch q.Format { + case DecimalExponent, DecimalSI: + return base * math.Pow10(exponent) + default: + // fast path for exponents that can fit in 64 bits + if exponent > 0 && exponent < 7 { + return base * float64(int64(1)<<(exponent*10)) + } + return base * math.Pow(2, float64(exponent*10)) + } +} + +// AsInt64 returns a representation of the current value as an int64 if a fast conversion +// is possible. If false is returned, callers must use the inf.Dec form of this quantity. +func (q *Quantity) AsInt64() (int64, bool) { + if q.d.Dec != nil { + return 0, false + } + return q.i.AsInt64() +} + +// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself. +func (q *Quantity) ToDec() *Quantity { + if q.d.Dec == nil { + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + } + return q +} + +// AsDec returns the quantity as represented by a scaled inf.Dec. +func (q *Quantity) AsDec() *inf.Dec { + if q.d.Dec != nil { + return q.d.Dec + } + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + return q.d.Dec +} + +// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa +// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra +// allocation. +func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + if q.d.Dec != nil { + return q.d.AsCanonicalBytes(out) + } + return q.i.AsCanonicalBytes(out) +} + +// IsZero returns true if the quantity is equal to zero. +func (q *Quantity) IsZero() bool { + if q.d.Dec != nil { + return q.d.Dec.Sign() == 0 + } + return q.i.value == 0 +} + +// AsScale returns the current value, rounded up to the provided scale, and returns +// false if the scale resulted in a loss of precision. +func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) { + if q.d.Dec != nil { + return q.d.AsScale(scale) + } + return q.i.AsScale(scale) +} + +// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) Cmp(y Quantity) int { + if q.d.Dec == nil && y.d.Dec == nil { + return q.i.Cmp(y.i) + } + return q.AsDec().Cmp(y.AsDec()) +} + +// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) CmpInt64(y int64) int { + if q.d.Dec != nil { + return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0))) + } + return q.i.Cmp(int64Amount{value: y}) +} + +// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation +// of most Quantity values. +const int64QuantityExpectedBytes = 18 + +// String formats the Quantity as a string, caching the result if not calculated. +// String is an expensive operation and caching this result significantly reduces the cost of +// normal parse / marshal operations on Quantity. +func (q *Quantity) String() string { + if q == nil { + return "" + } + if len(q.s) == 0 { + result := make([]byte, 0, int64QuantityExpectedBytes) + number, suffix := q.CanonicalizeBytes(result) + number = append(number, suffix...) + q.s = string(number) + } + return q.s +} + +// MarshalJSON implements the json.Marshaller interface. +func (q Quantity) MarshalJSON() ([]byte, error) { + if len(q.s) > 0 { + out := make([]byte, len(q.s)+2) + out[0], out[len(out)-1] = '"', '"' + copy(out[1:], q.s) + return out, nil + } + result := make([]byte, int64QuantityExpectedBytes) + result[0] = '"' + number, suffix := q.CanonicalizeBytes(result[1:1]) + // if the same slice was returned to us that we passed in, avoid another allocation by copying number into + // the source slice and returning that + if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes { + number = append(number, suffix...) + number = append(number, '"') + return result[:1+len(number)], nil + } + // if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use + // append + result = result[:1] + result = append(result, number...) // nolint: makezero + result = append(result, suffix...) // nolint: makezero + result = append(result, '"') // nolint: makezero + return result, nil +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (q Quantity) ToUnstructured() interface{} { + return q.String() +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +// TODO: Remove support for leading/trailing whitespace +func (q *Quantity) UnmarshalJSON(value []byte) error { + l := len(value) + if l == 4 && bytes.Equal(value, []byte("null")) { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + if l >= 2 && value[0] == '"' && value[l-1] == '"' { + value = value[1 : l-1] + } + + parsed, err := ParseQuantity(strings.TrimSpace(string(value))) + if err != nil { + return err + } + + // This copy is safe because parsed will not be referred to again. + *q = parsed + return nil +} + +// NewDecimalQuantity returns a new Quantity representing the given +// value in the given format. +func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { + return &Quantity{ + d: infDecAmount{&b}, + Format: format, + } +} + +// NewQuantity returns a new Quantity representing the given +// value in the given format. +func NewQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value}, + Format: format, + } +} + +// NewMilliQuantity returns a new Quantity representing the given +// value * 1/1000 in the given format. Note that BinarySI formatting +// will round fractional values, and will be changed to DecimalSI for +// values x where (-1 < x < 1) && (x != 0). +func NewMilliQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: -3}, + Format: format, + } +} + +// NewScaledQuantity returns a new Quantity representing the given +// value * 10^scale in DecimalSI format. +func NewScaledQuantity(value int64, scale Scale) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: scale}, + Format: DecimalSI, + } +} + +// Value returns the unscaled value of q rounded up to the nearest integer away from 0. +func (q *Quantity) Value() int64 { + return q.ScaledValue(0) +} + +// MilliValue returns the value of ceil(q * 1000); this could overflow an int64; +// if that's a concern, call Value() first to verify the number is small enough. +func (q *Quantity) MilliValue() int64 { + return q.ScaledValue(Milli) +} + +// ScaledValue returns the value of ceil(q / 10^scale). +// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. +// This could overflow an int64. +// To detect overflow, call Value() first and verify the expected magnitude. +func (q *Quantity) ScaledValue(scale Scale) int64 { + if q.d.Dec == nil { + i, _ := q.i.AsScaledInt64(scale) + return i + } + dec := q.d.Dec + return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale())) +} + +// Set sets q's value to be value. +func (q *Quantity) Set(value int64) { + q.SetScaled(value, 0) +} + +// SetMilli sets q's value to be value * 1/1000. +func (q *Quantity) SetMilli(value int64) { + q.SetScaled(value, Milli) +} + +// SetScaled sets q's value to be value * 10^scale +func (q *Quantity) SetScaled(value int64, scale Scale) { + q.s = "" + q.d.Dec = nil + q.i = int64Amount{value: value, scale: scale} +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/scale_int.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/scale_int.go new file mode 100644 index 00000000000..55e177b0e9b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/scale_int.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math" + "math/big" + "sync" +) + +var ( + // A sync pool to reduce allocation. + intPool sync.Pool + maxInt64 = big.NewInt(math.MaxInt64) +) + +func init() { + intPool.New = func() interface{} { + return &big.Int{} + } +} + +// scaledValue scales given unscaled value from scale to new Scale and returns +// an int64. It ALWAYS rounds up the result when scale down. The final result might +// overflow. +// +// scale, newScale represents the scale of the unscaled decimal. +// The mathematical value of the decimal is unscaled * 10**(-scale). +func scaledValue(unscaled *big.Int, scale, newScale int) int64 { + dif := scale - newScale + if dif == 0 { + return unscaled.Int64() + } + + // Handle scale up + // This is an easy case, we do not need to care about rounding and overflow. + // If any intermediate operation causes overflow, the result will overflow. + if dif < 0 { + return unscaled.Int64() * int64(math.Pow10(-dif)) + } + + // Handle scale down + // We have to be careful about the intermediate operations. + + // fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64 + const log10MaxInt64 = 19 + if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 { + divide := int64(math.Pow10(dif)) + result := unscaled.Int64() / divide + mod := unscaled.Int64() % divide + if mod != 0 { + return result + 1 + } + return result + } + + // We should only convert back to int64 when getting the result. + divisor := intPool.Get().(*big.Int) + exp := intPool.Get().(*big.Int) + result := intPool.Get().(*big.Int) + defer func() { + intPool.Put(divisor) + intPool.Put(exp) + intPool.Put(result) + }() + + // divisor = 10^(dif) + // TODO: create loop up table if exp costs too much. + divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil) + // reuse exp + remainder := exp + + // result = unscaled / divisor + // remainder = unscaled % divisor + result.DivMod(unscaled, divisor, remainder) + if remainder.Sign() != 0 { + return result.Int64() + 1 + } + + return result.Int64() +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/suffix.go new file mode 100644 index 00000000000..6ec527f9c00 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/suffix.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "strconv" +) + +type suffix string + +// suffixer can interpret and construct suffixes. +type suffixer interface { + interpret(suffix) (base, exponent int32, fmt Format, ok bool) + construct(base, exponent int32, fmt Format) (s suffix, ok bool) + constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool) +} + +// quantitySuffixer handles suffixes for all three formats that quantity +// can handle. +var quantitySuffixer = newSuffixer() + +type bePair struct { + base, exponent int32 +} + +type listSuffixer struct { + suffixToBE map[suffix]bePair + beToSuffix map[bePair]suffix + beToSuffixBytes map[bePair][]byte +} + +func (ls *listSuffixer) addSuffix(s suffix, pair bePair) { + if ls.suffixToBE == nil { + ls.suffixToBE = map[suffix]bePair{} + } + if ls.beToSuffix == nil { + ls.beToSuffix = map[bePair]suffix{} + } + if ls.beToSuffixBytes == nil { + ls.beToSuffixBytes = map[bePair][]byte{} + } + ls.suffixToBE[s] = pair + ls.beToSuffix[pair] = s + ls.beToSuffixBytes[pair] = []byte(s) +} + +func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) { + pair, ok := ls.suffixToBE[s] + if !ok { + return 0, 0, false + } + return pair.base, pair.exponent, true +} + +func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) { + s, ok = ls.beToSuffix[bePair{base, exponent}] + return +} + +func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) { + s, ok = ls.beToSuffixBytes[bePair{base, exponent}] + return +} + +type suffixHandler struct { + decSuffixes listSuffixer + binSuffixes listSuffixer +} + +type fastLookup struct { + *suffixHandler +} + +func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) { + switch s { + case "": + return 10, 0, DecimalSI, true + case "n": + return 10, -9, DecimalSI, true + case "u": + return 10, -6, DecimalSI, true + case "m": + return 10, -3, DecimalSI, true + case "k": + return 10, 3, DecimalSI, true + case "M": + return 10, 6, DecimalSI, true + case "G": + return 10, 9, DecimalSI, true + } + return l.suffixHandler.interpret(s) +} + +func newSuffixer() suffixer { + sh := &suffixHandler{} + + // IMPORTANT: if you change this section you must change fastLookup + + sh.binSuffixes.addSuffix("Ki", bePair{2, 10}) + sh.binSuffixes.addSuffix("Mi", bePair{2, 20}) + sh.binSuffixes.addSuffix("Gi", bePair{2, 30}) + sh.binSuffixes.addSuffix("Ti", bePair{2, 40}) + sh.binSuffixes.addSuffix("Pi", bePair{2, 50}) + sh.binSuffixes.addSuffix("Ei", bePair{2, 60}) + // Don't emit an error when trying to produce + // a suffix for 2^0. + sh.decSuffixes.addSuffix("", bePair{2, 0}) + + sh.decSuffixes.addSuffix("n", bePair{10, -9}) + sh.decSuffixes.addSuffix("u", bePair{10, -6}) + sh.decSuffixes.addSuffix("m", bePair{10, -3}) + sh.decSuffixes.addSuffix("", bePair{10, 0}) + sh.decSuffixes.addSuffix("k", bePair{10, 3}) + sh.decSuffixes.addSuffix("M", bePair{10, 6}) + sh.decSuffixes.addSuffix("G", bePair{10, 9}) + sh.decSuffixes.addSuffix("T", bePair{10, 12}) + sh.decSuffixes.addSuffix("P", bePair{10, 15}) + sh.decSuffixes.addSuffix("E", bePair{10, 18}) + + return fastLookup{sh} +} + +func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) { + switch fmt { + case DecimalSI: + return sh.decSuffixes.construct(base, exponent) + case BinarySI: + return sh.binSuffixes.construct(base, exponent) + case DecimalExponent: + if base != 10 { + return "", false + } + if exponent == 0 { + return "", true + } + return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true + } + return "", false +} + +func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) { + switch format { + case DecimalSI: + return sh.decSuffixes.constructBytes(base, exponent) + case BinarySI: + return sh.binSuffixes.constructBytes(base, exponent) + case DecimalExponent: + if base != 10 { + return nil, false + } + if exponent == 0 { + return nil, true + } + result := make([]byte, 8) + result[0] = 'e' + number := strconv.AppendInt(result[1:1], int64(exponent), 10) + if &result[1] == &number[0] { + return result[:1+len(number)], true + } + result = append(result[:1], number...) + return result, true + } + return nil, false +} + +func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) { + // Try lookup tables first + if b, e, ok := sh.decSuffixes.lookup(suffix); ok { + return b, e, DecimalSI, true + } + if b, e, ok := sh.binSuffixes.lookup(suffix); ok { + return b, e, BinarySI, true + } + + if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') { + parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64) + if err != nil { + return 0, 0, DecimalExponent, false + } + return 10, int32(parsed), DecimalExponent, true + } + + return 0, 0, DecimalExponent, false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go new file mode 100644 index 00000000000..f95ba6eacf8 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -0,0 +1,55 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +type Duration struct { + time.Duration +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (d *Duration) UnmarshalJSON(b []byte) error { + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pd, err := time.ParseDuration(str) + if err != nil { + return err + } + d.Duration = pd + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (d Duration) ToUnstructured() interface{} { + return d.Duration.String() +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go new file mode 100644 index 00000000000..a6dbd7cbd6b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -0,0 +1,171 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type MicroTime struct { + time.Time +} + +// DeepCopy returns a deep-copy of the MicroTime value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *MicroTime) DeepCopyInto(out *MicroTime) { + *out = *t +} + +// NewMicroTime returns a wrapped instance of the provided time +func NewMicroTime(time time.Time) MicroTime { + return MicroTime{time} +} + +// DateMicro returns the MicroTime corresponding to the supplied parameters +// by wrapping time.Date. +func DateMicro(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) MicroTime { + return MicroTime{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// NowMicro returns the current local time. +func NowMicro() MicroTime { + return MicroTime{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *MicroTime) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *MicroTime) Before(u *MicroTime) bool { + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false +} + +// Equal reports whether the time instant t is equal to u. +func (t *MicroTime) Equal(u *MicroTime) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// BeforeTime reports whether the time instant t is before second-lever precision u. +func (t *MicroTime) BeforeTime(u *Time) bool { + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false +} + +// EqualTime reports whether the time instant t is equal to second-lever precision u. +func (t *MicroTime) EqualTime(u *Time) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// UnixMicro returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func UnixMicro(sec int64, nsec int64) MicroTime { + return MicroTime{time.Unix(sec, nsec)} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *MicroTime) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *MicroTime) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t MicroTime) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(RFC3339Micro)) +} + +// MarshalQueryParameter converts to a URL query parameter value +func (t MicroTime) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(RFC3339Micro), nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/time.go new file mode 100644 index 00000000000..bdc55ab348f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -0,0 +1,172 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// NewTime returns a wrapped instance of the provided time +func NewTime(time time.Time) Time { + return Time{time} +} + +// Date returns the Time corresponding to the supplied parameters +// by wrapping time.Date. +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// Now returns the current local time. +func Now() Time { + return Time{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *Time) Before(u *Time) bool { + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false +} + +// Equal reports whether the time instant t is equal to u. +func (t *Time) Equal(u *Time) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// Unix returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func Unix(sec int64, nsec int64) Time { + return Time{time.Unix(sec, nsec)} +} + +// Rfc3339Copy returns a copy of the Time at second-level precision. +func (t Time) Rfc3339Copy() Time { + copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) + return Time{copied} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *Time) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + buf := make([]byte, 0, len(time.RFC3339)+2) + buf = append(buf, '"') + // time cannot contain non escapable JSON characters + buf = t.UTC().AppendFormat(buf, time.RFC3339) + buf = append(buf, '"') + return buf, nil +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (t Time) ToUnstructured() interface{} { + if t.IsZero() { + return nil + } + buf := make([]byte, 0, len(time.RFC3339)) + buf = t.UTC().AppendFormat(buf, time.RFC3339) + return string(buf) +} + +// MarshalQueryParameter converts to a URL query parameter value +func (t Time) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(time.RFC3339), nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go new file mode 100644 index 00000000000..39073c06bdc --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -0,0 +1,1332 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API types that are common to all versions. +// +// The package contains two categories of types: +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// In the future, we will probably move these categories of objects into +// separate packages. +package v1 + +import ( + "fmt" + "strings" + + "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types" +) + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +type TypeMeta struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty"` + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + // +optional + APIVersion string `json:"apiVersion,omitempty"` +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +type ListMeta struct { + // selfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. + // +optional + SelfLink string `json:"selfLink,omitempty"` + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty"` + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // consistent list may not be possible if the server configuration has changed or more than a few + // minutes have passed. The resourceVersion field returned when using this continue value will be + // identical to the value in the first response, unless you have received this token from an error + // message. + Continue string `json:"continue,omitempty"` + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + RemainingItemCount *int64 `json:"remainingItemCount,omitempty"` +} + +// Field path constants that are specific to the internal API +// representation. +const ( + ObjectNameField = "metadata.name" +) + +// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here +const ( + FinalizerOrphanDependents = "orphan" + FinalizerDeleteDependents = "foregroundDeletion" +) + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty"` + + // Namespace defines the space within which each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. + // +optional + SelfLink string `json:"selfLink,omitempty"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + CreationTimestamp Time `json:"creationTimestamp,omitempty"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + DeletionTimestamp *Time `json:"deletionTimestamp,omitempty"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. + // +optional + // +patchStrategy=merge + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty"` + + // ManagedFields maps workflow-id and version to the set of fields + // that are managed by that workflow. This is mostly for internal + // housekeeping, and users typically shouldn't need to set or + // understand this field. A workflow can be the user's name, a + // controller's name, or the name of a specific apply path like + // "ci-cd". The set of fields is always in the version that the + // workflow used when modifying the object. + // + // +optional + ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic = "kube-public" +) + +// OwnerReference contains enough information to let you identify an owning +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. +// +structType=atomic +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + Kind string `json:"kind"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + UID types.UID `json:"uid"` + // If true, this reference points to the managing controller. + // +optional + Controller *bool `json:"controller,omitempty"` + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty"` + + // +k8s:deprecated=includeUninitialized,protobuf=6 + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty"` + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // +optional + AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty"` + + // resourceVersion sets a constraint on what resource versions a request may be served from. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for + // details. + // + // Defaults to unset + // +optional + ResourceVersion string `json:"resourceVersion,omitempty"` + + // resourceVersionMatch determines how resourceVersion is applied to list calls. + // It is highly recommended that resourceVersionMatch be set for list calls where + // resourceVersion is set + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for + // details. + // + // Defaults to unset + // +optional + ResourceVersionMatch ResourceVersionMatch `json:"resourceVersionMatch,omitempty"` + // Timeout for the list/watch call. + // This limits the duration of the call, regardless of any activity or inactivity. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + Limit int64 `json:"limit,omitempty"` + // The continue option should be set when retrieving more results from the server. Since this value is + // server defined, clients may only use the continue value from a previous query result with identical + // query parameters (except for the value of continue) and the server may reject a continue value it + // does not recognize. If the specified continue value is no longer valid whether due to expiration + // (generally five to fifteen minutes) or a configuration change on the server, the server will + // respond with a 410 ResourceExpired error together with a continue token. If the client needs a + // consistent list, it must restart their list without the continue field. Otherwise, the client may + // send another list request with the token received with the 410 error, the server will respond with + // a list starting from the next key, but from the latest snapshot, which is inconsistent from the + // previous list results - objects that are created, modified, or deleted after the first list request + // will be included in the response, as long as their keys are after the "next key". + // + // This field is not supported when watch is true. Clients may start a watch from the last + // resourceVersion value returned by the server and not miss any modifications. + Continue string `json:"continue,omitempty"` +} + +// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch +// may only be set if resourceVersion is also set. +// +// "NotOlderThan" matches data at least as new as the provided resourceVersion. +// "Exact" matches data at the exact resourceVersion provided. +// +// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for +// details. +type ResourceVersionMatch string + +const ( + // ResourceVersionMatchNotOlderThan matches data at least as new as the provided + // resourceVersion. + ResourceVersionMatchNotOlderThan ResourceVersionMatch = "NotOlderThan" + // ResourceVersionMatchExact matches data at the exact resourceVersion + // provided. + ResourceVersionMatchExact ResourceVersionMatch = "Exact" +) + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GetOptions is the standard query options to the standard REST get call. +type GetOptions struct { + TypeMeta `json:",inline"` + // resourceVersion sets a constraint on what resource versions a request may be served from. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for + // details. + // + // Defaults to unset + // +optional + ResourceVersion string `json:"resourceVersion,omitempty"` + // +k8s:deprecated=includeUninitialized,protobuf=2 +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of +// the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will + // delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector + // deletes all the dependents whose ownerReference.blockOwnerDeletion=true + // from the key-value store. API sever will put the "foregroundDeletion" + // finalizer on the object, and sets its deletionTimestamp. This policy is + // cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +const ( + // DryRunAll means to complete all processing stages, but don't + // persist changes to storage. + DryRunAll = "All" +) + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeleteOptions may be provided when deleting an API object. +type DeleteOptions struct { + TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +k8s:conversion-gen=false + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + // +optional + PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CreateOptions may be provided when creating an API object. +type CreateOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty"` + // +k8s:deprecated=includeUninitialized,protobuf=2 + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PatchOptions may be provided when patching an API object. +// PatchOptions is meant to be a superset of UpdateOptions. +type PatchOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty"` + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool `json:"force,omitempty"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required for apply requests + // (application/apply-patch) but optional for non-apply patch + // types (JsonPatch, MergePatch, StrategicMergePatch). + // +optional + FieldManager string `json:"fieldManager,omitempty"` +} + +// ApplyOptions may be provided when applying an API object. +// FieldManager is required for apply requests. +// ApplyOptions is equivalent to PatchOptions. It is provided as a convenience with documentation +// that speaks specifically to how the options fields relate to apply. +type ApplyOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty"` + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. + Force bool `json:"force"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required. + FieldManager string `json:"fieldManager"` +} + +func (o ApplyOptions) ToPatchOptions() PatchOptions { + return PatchOptions{DryRun: o.DryRun, Force: &o.Force, FieldManager: o.FieldManager} +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UpdateOptions may be provided when updating an API object. +// All fields in UpdateOptions should also be present in PatchOptions. +type UpdateOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty"` + // Specifies the target ResourceVersion + // +optional + ResourceVersion *string `json:"resourceVersion,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Status is a return value for calls that don't return other objects. +type Status struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status string `json:"status,omitempty"` + // A human-readable description of the status of this operation. + // +optional + Message string `json:"message,omitempty"` + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + Reason StatusReason `json:"reason,omitempty"` + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + Details *StatusDetails `json:"details,omitempty"` + // Suggested HTTP return code for this status, 0 if not set. + // +optional + Code int32 `json:"code,omitempty"` +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +type StatusDetails struct { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + Name string `json:"name,omitempty"` + // The group attribute of the resource associated with the status StatusReason. + // +optional + Group string `json:"group,omitempty"` + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty"` + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty"` + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + Causes []StatusCause `json:"causes,omitempty"` + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. + // +optional + RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty"` +} + +// Values of Status.Status +const ( + StatusSuccess = "Success" + StatusFailure = "Failure" +) + +// StatusReason is an enumeration of possible failure causes. Each StatusReason +// must map to a single HTTP status code, but multiple reasons may map +// to the same HTTP status code. +// TODO: move to apiserver +type StatusReason string + +const ( + // StatusReasonUnknown means the server has declined to indicate a specific reason. + // The details field may contain other information about this error. + // Status code 500. + StatusReasonUnknown StatusReason = "" + + // StatusReasonUnauthorized means the server can be reached and understood the request, but requires + // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) + // in order for the action to be completed. If the user has specified credentials on the request, the + // server considers them insufficient. + // Status code 401 + StatusReasonUnauthorized StatusReason = "Unauthorized" + + // StatusReasonForbidden means the server can be reached and understood the request, but refuses + // to take any further action. It is the result of the server being configured to deny access for some reason + // to the requested resource by the client. + // Details (optional): + // "kind" string - the kind attribute of the forbidden resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the forbidden resource + // Status code 403 + StatusReasonForbidden StatusReason = "Forbidden" + + // StatusReasonNotFound means one or more resources required for this operation + // could not be found. + // Details (optional): + // "kind" string - the kind attribute of the missing resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the missing resource + // Status code 404 + StatusReasonNotFound StatusReason = "NotFound" + + // StatusReasonAlreadyExists means the resource you are creating already exists. + // Details (optional): + // "kind" string - the kind attribute of the conflicting resource + // "id" string - the identifier of the conflicting resource + // Status code 409 + StatusReasonAlreadyExists StatusReason = "AlreadyExists" + + // StatusReasonConflict means the requested operation cannot be completed + // due to a conflict in the operation. The client may need to alter the + // request. Each resource may define custom details that indicate the + // nature of the conflict. + // Status code 409 + StatusReasonConflict StatusReason = "Conflict" + + // StatusReasonGone means the item is no longer available at the server and no + // forwarding address is known. + // Status code 410 + StatusReasonGone StatusReason = "Gone" + + // StatusReasonInvalid means the requested create or update operation cannot be + // completed due to invalid data provided as part of the request. The client may + // need to alter the request. When set, the client may use the StatusDetails + // message field as a summary of the issues encountered. + // Details (optional): + // "kind" string - the kind attribute of the invalid resource + // "id" string - the identifier of the invalid resource + // "causes" - one or more StatusCause entries indicating the data in the + // provided resource that was invalid. The code, message, and + // field attributes will be set. + // Status code 422 + StatusReasonInvalid StatusReason = "Invalid" + + // StatusReasonServerTimeout means the server can be reached and understood the request, + // but cannot complete the action in a reasonable time. The client should retry the request. + // This is may be due to temporary server load or a transient communication issue with + // another server. Status code 500 is used because the HTTP spec provides no suitable + // server-requested client retry and the 5xx class represents actionable errors. + // Details (optional): + // "kind" string - the kind attribute of the resource being acted on. + // "id" string - the operation that is being attempted. + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 500 + StatusReasonServerTimeout StatusReason = "ServerTimeout" + + // StatusReasonTimeout means that the request could not be completed within the given time. + // Clients can get this response only when they specified a timeout param in the request, + // or if the server cannot complete the operation within a reasonable amount of time. + // The request might succeed with an increased value of timeout param. The client *should* + // wait at least the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 504 + StatusReasonTimeout StatusReason = "Timeout" + + // StatusReasonTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. A client may + // always retry the request that led to this error, although the client should wait at least + // the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 429 + StatusReasonTooManyRequests StatusReason = "TooManyRequests" + + // StatusReasonBadRequest means that the request itself was invalid, because the request + // doesn't make any sense, for example deleting a read-only object. This is different than + // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the + // data was invalid. API calls that return BadRequest can never succeed. + // Status code 400 + StatusReasonBadRequest StatusReason = "BadRequest" + + // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the + // resource was not supported by the code - for instance, attempting to delete a resource that + // can only be created. API calls that return MethodNotAllowed can never succeed. + // Status code 405 + StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + + // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable + // to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml. + // API calls that return NotAcceptable can never succeed. + // Status code 406 + StatusReasonNotAcceptable StatusReason = "NotAcceptable" + + // StatusReasonRequestEntityTooLarge means that the request entity is too large. + // Status code 413 + StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge" + + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable + // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. + // API calls that return UnsupportedMediaType can never succeed. + // Status code 415 + StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType" + + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected + // and the outcome of the call is unknown. + // Details (optional): + // "causes" - The original error + // Status code 500 + StatusReasonInternalError StatusReason = "InternalError" + + // StatusReasonExpired indicates that the request is invalid because the content you are requesting + // has expired and is no longer available. It is typically associated with watches that can't be + // serviced. + // Status code 410 (gone) + StatusReasonExpired StatusReason = "Expired" + + // StatusReasonServiceUnavailable means that the request itself was valid, + // but the requested service is unavailable at this time. + // Retrying the request after some time might succeed. + // Status code 503 + StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" +) + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +type StatusCause struct { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + Type CauseType `json:"reason,omitempty"` + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + Message string `json:"message,omitempty"` + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + Field string `json:"field,omitempty"` +} + +// CauseType is a machine readable value providing more detail about what +// occurred in a status response. An operation may have multiple causes for a +// status (whether Failure or Success). +type CauseType string + +const ( + // CauseTypeFieldValueNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). + CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" + // CauseTypeFieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + CauseTypeFieldValueRequired CauseType = "FieldValueRequired" + // CauseTypeFieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" + // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex + // match). + CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" + // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) + // values that can not be handled (e.g. an enumerated string). + CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client + // without the expected return type. The presence of this cause indicates the error may be + // due to an intervening proxy or the server software malfunctioning. + CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" + // FieldManagerConflict is used to report when another client claims to manage this field, + // It should only be returned for a request using server-side apply. + CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" + // CauseTypeResourceVersionTooLarge is used to report that the requested resource version + // is newer than the data observed by the API server, so the request cannot be served. + CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge" +) + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type APIVersions struct { + TypeMeta `json:",inline"` + // versions are the api versions that are available. + Versions []string `json:"versions"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +type APIGroupList struct { + TypeMeta `json:",inline"` + // groups is a list of APIGroup. + Groups []APIGroup `json:"groups"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +type APIGroup struct { + TypeMeta `json:",inline"` + // name is the name of the group. + Name string `json:"name"` + // versions are the versions supported in this group. + Versions []GroupVersionForDiscovery `json:"versions"` + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +optional + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty"` +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress"` +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +type GroupVersionForDiscovery struct { + // groupVersion specifies the API group and version in the form "group/version" + GroupVersion string `json:"groupVersion"` + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + Version string `json:"version"` +} + +// APIResource specifies the name of a resource and whether it is namespaced. +type APIResource struct { + // name is the plural name of the resource. + Name string `json:"name"` + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + SingularName string `json:"singularName"` + // namespaced indicates if a resource is namespaced or not. + Namespaced bool `json:"namespaced"` + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + Group string `json:"group,omitempty"` + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + Version string `json:"version,omitempty"` + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + Kind string `json:"kind"` + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + Verbs Verbs `json:"verbs"` + // shortNames is a list of suggested short names of the resource. + ShortNames []string `json:"shortNames,omitempty"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + Categories []string `json:"categories,omitempty"` + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + StorageVersionHash string `json:"storageVersionHash,omitempty"` +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Verbs []string + +func (vs Verbs) String() string { + return fmt.Sprintf("%v", []string(vs)) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +type APIResourceList struct { + TypeMeta `json:",inline"` + // groupVersion is the group and version this APIResourceList is for. + GroupVersion string `json:"groupVersion"` + // resources contains the name of the resources and if they are namespaced. + APIResources []APIResource `json:"resources"` +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +type RootPaths struct { + // paths are the paths available at root. + Paths []string `json:"paths"` +} + +// TODO: remove me when watch is refactored +func LabelSelectorQueryParam(version string) string { + return "labelSelector" +} + +// TODO: remove me when watch is refactored +func FieldSelectorQueryParam(version string) string { + return "fieldSelector" +} + +// String returns available api versions as a human-friendly version string. +func (apiVersions APIVersions) String() string { + return strings.Join(apiVersions.Versions, ",") +} + +func (apiVersions APIVersions) GoString() string { + return apiVersions.String() +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +type Patch struct{} + +// Note: +// There are two different styles of label selectors used in versioned types: +// an older style which is represented as just a string in versioned types, and a +// newer style that is structured. LabelSelector is an internal representation for the +// latter style. + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +// +structType=atomic +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + Values []string `json:"values,omitempty"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) + +// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource +// that the fieldset applies to. +type ManagedFieldsEntry struct { + // Manager is an identifier of the workflow managing these fields. + Manager string `json:"manager,omitempty"` + // Operation is the type of operation which lead to this ManagedFieldsEntry being created. + // The only valid values for this field are 'Apply' and 'Update'. + Operation ManagedFieldsOperationType `json:"operation,omitempty"` + // APIVersion defines the version of this resource that this field set + // applies to. The format is "group/version" just like the top-level + // APIVersion field. It is necessary to track the version of a field + // set because it cannot be automatically converted. + APIVersion string `json:"apiVersion,omitempty"` + // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // +optional + Time *Time `json:"time,omitempty"` + + // Fields is tombstoned to show why 5 is a reserved protobuf tag. + // Fields *Fields `json:"fields,omitempty"` + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + FieldsType string `json:"fieldsType,omitempty"` + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. + // +optional + FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty"` + + // Subresource is the name of the subresource used to update that object, or + // empty string if the object was updated through the main resource. The + // value of this field is used to distinguish between managers, even if they + // share the same name. For example, a status update will be distinct from a + // regular update using the same manager name. + // Note that the APIVersion field is not related to the Subresource field and + // it always corresponds to the version of the main resource. + Subresource string `json:"subresource,omitempty"` +} + +// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. +type ManagedFieldsOperationType string + +const ( + ManagedFieldsOperationApply ManagedFieldsOperationType = "Apply" + ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" +) + +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +// +protobuf.options.(gogoproto.goproto_stringer)=false +type FieldsV1 struct { + // Raw is the underlying serialization of this object. + Raw []byte `json:"-"` +} + +func (f FieldsV1) String() string { + return string(f.Raw) +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + TypeMeta `json:",inline"` + + // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions + // and may be removed as a field in a future release. + NoHeaders bool `json:"-"` + + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // items contains each of the included items. + Items []PartialObjectMetadata `json:"items"` +} + +// Condition contains details for one aspect of the current state of this API Resource. +// --- +// This struct is intended for direct use as an array at the field path .status.conditions. For example, +// type FooStatus struct{ +// // Represents the observations of a foo's current state. +// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" +// // +patchMergeKey=type +// // +patchStrategy=merge +// // +listType=map +// // +listMapKey=type +// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +// +// // other fields +// } +type Condition struct { + // type of condition in CamelCase or in foo.example.com/CamelCase. + // --- + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + // useful (see .node.status.conditions), the ability to deconflict is important. + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Type string `json:"type"` + // status of the condition, one of True, False, Unknown. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=True;False;Unknown + Status ConditionStatus `json:"status"` + // observedGeneration represents the .metadata.generation that the condition was set based upon. + // For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + // with respect to the current state of the instance. + // +optional + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // lastTransitionTime is the last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastTransitionTime Time `json:"lastTransitionTime"` + // reason contains a programmatic identifier indicating the reason for the condition's last transition. + // Producers of specific condition types may define expected values and meanings for this field, + // and whether the values are considered a guaranteed API. + // The value should be a CamelCase string. + // This field may not be empty. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$` + Reason string `json:"reason"` + // message is a human readable message indicating details about the transition. + // This may be an empty string. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=32768 + Message string `json:"message"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types/uid.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types/uid.go new file mode 100644 index 00000000000..869339222e9 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types/uid.go @@ -0,0 +1,22 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// UID is a type that holds unique ID values, including UUIDs. Because we +// don't ONLY use UUIDs, this is an alias to string. Being a type captures +// intent and helps make sure that UIDs and names do not get conflated. +type UID string diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go new file mode 100644 index 00000000000..a502b5adb69 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -0,0 +1,43 @@ +//go:build !notest +// +build !notest + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + fuzz "github.com/google/gofuzz" +) + +// Fuzz satisfies fuzz.Interface +func (intstr *IntOrString) Fuzz(c fuzz.Continue) { + if intstr == nil { + return + } + if c.RandBool() { + intstr.Type = Int + c.Fuzz(&intstr.IntVal) + intstr.StrVal = "" + } else { + intstr.Type = String + intstr.IntVal = 0 + c.Fuzz(&intstr.StrVal) + } +} + +// ensure IntOrString implements fuzz.Interface +var _ fuzz.Interface = &IntOrString{} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/intstr.go new file mode 100644 index 00000000000..29872c22c47 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -0,0 +1,113 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type IntOrString struct { + Type Type + IntVal int32 + StrVal string +} + +// Type represents the stored type of IntOrString. +type Type int64 + +const ( + Int Type = iota // The IntOrString holds an int. + String // The IntOrString holds a string. +) + +// FromInt creates an IntOrString object with an int32 value. It is +// your responsibility not to call this method with a value greater +// than int32. +// TODO: convert to (val int32) +func FromInt(val int) IntOrString { + return IntOrString{Type: Int, IntVal: int32(val)} +} + +// FromString creates an IntOrString object with a string value. +func FromString(val string) IntOrString { + return IntOrString{Type: String, StrVal: val} +} + +// Parse the given string and try to convert it to an integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.Atoi(val) + if err != nil { + return FromString(val) + } + return FromInt(i) +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (intstr *IntOrString) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + intstr.Type = String + return json.Unmarshal(value, &intstr.StrVal) + } + intstr.Type = Int + return json.Unmarshal(value, &intstr.IntVal) +} + +// String returns the string value, or the Itoa of the int value. +func (intstr *IntOrString) String() string { + if intstr == nil { + return "" + } + if intstr.Type == String { + return intstr.StrVal + } + return strconv.Itoa(intstr.IntValue()) +} + +// IntValue returns the IntVal if type Int, or if +// it is a String, will attempt a conversion to int, +// returning 0 if a parsing error occurs. +func (intstr *IntOrString) IntValue() int { + if intstr.Type == String { + i, _ := strconv.Atoi(intstr.StrVal) + return i + } + return int(intstr.IntVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (intstr IntOrString) MarshalJSON() ([]byte, error) { + switch intstr.Type { + case Int: + return json.Marshal(intstr.IntVal) + case String: + return json.Marshal(intstr.StrVal) + default: + return []byte{}, fmt.Errorf("impossible IntOrString.Type") + } +} diff --git a/vendor/github.com/containers/podman/v3/pkg/kubeutils/LICENSE b/vendor/github.com/containers/podman/v4/pkg/kubeutils/LICENSE similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/kubeutils/LICENSE rename to vendor/github.com/containers/podman/v4/pkg/kubeutils/LICENSE diff --git a/vendor/github.com/containers/podman/v3/pkg/kubeutils/resize.go b/vendor/github.com/containers/podman/v4/pkg/kubeutils/resize.go similarity index 90% rename from vendor/github.com/containers/podman/v3/pkg/kubeutils/resize.go rename to vendor/github.com/containers/podman/v4/pkg/kubeutils/resize.go index 5d004bc544c..a744c66cc5b 100644 --- a/vendor/github.com/containers/podman/v3/pkg/kubeutils/resize.go +++ b/vendor/github.com/containers/podman/v4/pkg/kubeutils/resize.go @@ -17,8 +17,7 @@ limitations under the License. package kubeutils import ( - "github.com/containers/podman/v3/libpod/define" - "k8s.io/apimachinery/pkg/util/runtime" + "github.com/containers/podman/v4/libpod/define" ) // HandleResizing spawns a goroutine that processes the resize channel, calling resizeFunc for each @@ -30,8 +29,6 @@ func HandleResizing(resize <-chan define.TerminalSize, resizeFunc func(size defi } go func() { - defer runtime.HandleCrash() - for { size, ok := <-resize if !ok { diff --git a/vendor/github.com/containers/podman/v3/pkg/lookup/lookup.go b/vendor/github.com/containers/podman/v4/pkg/lookup/lookup.go similarity index 99% rename from vendor/github.com/containers/podman/v3/pkg/lookup/lookup.go rename to vendor/github.com/containers/podman/v4/pkg/lookup/lookup.go index 0b22a19747e..b8ac3046ee8 100644 --- a/vendor/github.com/containers/podman/v3/pkg/lookup/lookup.go +++ b/vendor/github.com/containers/podman/v4/pkg/lookup/lookup.go @@ -14,7 +14,7 @@ const ( etcgroup = "/etc/group" ) -// Overrides allows you to override defaults in GetUserGroupInfo +// Overrides allows you to override defaults in GetUserGroupInfo. type Overrides struct { DefaultUser *user.ExecUser ContainerEtcPasswdPath string @@ -61,7 +61,7 @@ func GetUserGroupInfo(containerMount, containerUser string, override *Overrides) defaultExecUser = override.DefaultUser } else { // Define a default container user - //defaultExecUser = &user.ExecUser{ + // defaultExecUser = &user.ExecUser{ // Uid: 0, // Gid: 0, // Home: "/", diff --git a/vendor/github.com/containers/podman/v3/pkg/namespaces/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go similarity index 96% rename from vendor/github.com/containers/podman/v3/pkg/namespaces/namespaces.go rename to vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go index a7736aee056..c95f8e27547 100644 --- a/vendor/github.com/containers/podman/v3/pkg/namespaces/namespaces.go +++ b/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go @@ -96,6 +96,11 @@ func (n UsernsMode) IsKeepID() bool { return n == "keep-id" } +// IsNoMap indicates whether container uses a mapping where the (uid, gid) on the host is not present in the namespace. +func (n UsernsMode) IsNoMap() bool { + return n == "nomap" +} + // IsAuto indicates whether container uses the "auto" userns mode. func (n UsernsMode) IsAuto() bool { parts := strings.Split(string(n), ":") @@ -158,7 +163,7 @@ func (n UsernsMode) IsPrivate() bool { func (n UsernsMode) Valid() bool { parts := strings.Split(string(n), ":") switch mode := parts[0]; mode { - case "", privateType, hostType, "keep-id", nsType, "auto": + case "", privateType, hostType, "keep-id", nsType, "auto", "nomap": case containerType: if len(parts) != 2 || parts[1] == "" { return false @@ -254,7 +259,7 @@ func (n IpcMode) IsHost() bool { return n == hostType } -// IsShareable indicates whether the container's ipc namespace can be shared with another container. +// IsShareable indicates whether the container uses its own shareable ipc namespace which can be shared. func (n IpcMode) IsShareable() bool { return n == shareableType } @@ -370,7 +375,7 @@ func (n NetworkMode) Container() string { return "" } -//UserDefined indicates user-created network +// UserDefined indicates user-created network func (n NetworkMode) UserDefined() string { if n.IsUserDefined() { return string(n) diff --git a/vendor/github.com/containers/podman/v3/pkg/parallel/parallel.go b/vendor/github.com/containers/podman/v4/pkg/parallel/parallel.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/parallel/parallel.go rename to vendor/github.com/containers/podman/v4/pkg/parallel/parallel.go diff --git a/vendor/github.com/containers/podman/v3/pkg/ps/define/types.go b/vendor/github.com/containers/podman/v4/pkg/ps/define/types.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/ps/define/types.go rename to vendor/github.com/containers/podman/v4/pkg/ps/define/types.go diff --git a/vendor/github.com/containers/podman/v3/pkg/resolvconf/dns/resolvconf.go b/vendor/github.com/containers/podman/v4/pkg/resolvconf/dns/resolvconf.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/resolvconf/dns/resolvconf.go rename to vendor/github.com/containers/podman/v4/pkg/resolvconf/dns/resolvconf.go diff --git a/vendor/github.com/containers/podman/v3/pkg/resolvconf/resolvconf.go b/vendor/github.com/containers/podman/v4/pkg/resolvconf/resolvconf.go similarity index 99% rename from vendor/github.com/containers/podman/v3/pkg/resolvconf/resolvconf.go rename to vendor/github.com/containers/podman/v4/pkg/resolvconf/resolvconf.go index cfaedc74abb..f23cd61b078 100644 --- a/vendor/github.com/containers/podman/v3/pkg/resolvconf/resolvconf.go +++ b/vendor/github.com/containers/podman/v4/pkg/resolvconf/resolvconf.go @@ -9,7 +9,7 @@ import ( "strings" "sync" - "github.com/containers/podman/v3/pkg/resolvconf/dns" + "github.com/containers/podman/v4/pkg/resolvconf/dns" "github.com/containers/storage/pkg/ioutils" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go similarity index 95% rename from vendor/github.com/containers/podman/v3/pkg/rootless/rootless.go rename to vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go index 93b4e2e9f61..d7143f54919 100644 --- a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless.go +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go @@ -1,6 +1,8 @@ package rootless import ( + "errors" + "fmt" "os" "sort" "sync" @@ -8,7 +10,6 @@ import ( "github.com/containers/storage/pkg/lockfile" "github.com/opencontainers/runc/libcontainer/user" spec "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) // TryJoinPauseProcess attempts to join the namespaces of the pause PID via @@ -16,12 +17,15 @@ import ( // file. func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { if _, err := os.Stat(pausePidPath); err != nil { - return false, -1, nil + if errors.Is(err, os.ErrNotExist) { + return false, -1, nil + } + return false, -1, err } became, ret, err := TryJoinFromFilePaths("", false, []string{pausePidPath}) if err == nil { - return became, ret, err + return became, ret, nil } // It could not join the pause process, let's lock the file before trying to delete it. @@ -31,7 +35,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { if os.IsNotExist(err) { return false, -1, nil } - return false, -1, errors.Wrapf(err, "error acquiring lock on %s", pausePidPath) + return false, -1, fmt.Errorf("acquiring lock on %s: %w", pausePidPath, err) } pidFileLock.Lock() @@ -46,7 +50,7 @@ func TryJoinPauseProcess(pausePidPath string) (bool, int, error) { if err != nil { // It is still failing. We can safely remove it. os.Remove(pausePidPath) - return false, -1, nil + return false, -1, nil // nolint: nilerr } return became, ret, err } diff --git a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless_linux.c b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c similarity index 81% rename from vendor/github.com/containers/podman/v3/pkg/rootless/rootless_linux.c rename to vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c index 6ce4b1e296f..94bd40f8627 100644 --- a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless_linux.c +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c @@ -19,6 +19,42 @@ #include #include +#ifndef TEMP_FAILURE_RETRY +#define TEMP_FAILURE_RETRY(expression) \ + (__extension__ \ + ({ long int __result; \ + do __result = (long int) (expression); \ + while (__result == -1L && errno == EINTR); \ + __result; })) +#endif + +#define cleanup_free __attribute__ ((cleanup (cleanup_freep))) +#define cleanup_close __attribute__ ((cleanup (cleanup_closep))) +#define cleanup_dir __attribute__ ((cleanup (cleanup_dirp))) + +static inline void +cleanup_freep (void *p) +{ + void **pp = (void **) p; + free (*pp); +} + +static inline void +cleanup_closep (void *p) +{ + int *pp = p; + if (*pp >= 0) + TEMP_FAILURE_RETRY (close (*pp)); +} + +static inline void +cleanup_dirp (DIR **p) +{ + DIR *dir = *p; + if (dir) + closedir (dir); +} + int rename_noreplace (int olddirfd, const char *oldpath, int newdirfd, const char *newpath) { int ret; @@ -45,15 +81,6 @@ int rename_noreplace (int olddirfd, const char *oldpath, int newdirfd, const cha return rename (oldpath, newpath); } -#ifndef TEMP_FAILURE_RETRY -#define TEMP_FAILURE_RETRY(expression) \ - (__extension__ \ - ({ long int __result; \ - do __result = (long int) (expression); \ - while (__result == -1L && errno == EINTR); \ - __result; })) -#endif - static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; @@ -106,6 +133,11 @@ do_pause () for (i = 0; sig[i]; i++) sigaction (sig[i], &act, NULL); + /* Attempt to execv catatonit to keep the pause process alive. */ + execl ("/usr/libexec/podman/catatonit", "catatonit", "-P", NULL); + execl ("/usr/bin/catatonit", "catatonit", "-P", NULL); + /* and if the catatonit executable could not be found, fallback here... */ + prctl (PR_SET_NAME, "podman pause", NULL, NULL, NULL); while (1) pause (); @@ -114,8 +146,8 @@ do_pause () static char ** get_cmd_line_args () { - int fd; - char *buffer; + cleanup_free char *buffer = NULL; + cleanup_close int fd = -1; size_t allocated; size_t used = 0; int ret; @@ -134,10 +166,7 @@ get_cmd_line_args () { ret = TEMP_FAILURE_RETRY (read (fd, buffer + used, allocated - used)); if (ret < 0) - { - free (buffer); - return NULL; - } + return NULL; if (ret == 0) break; @@ -148,30 +177,21 @@ get_cmd_line_args () allocated += 512; char *tmp = realloc (buffer, allocated); if (tmp == NULL) - { - free (buffer); - return NULL; - } + return NULL; buffer = tmp; } } - close (fd); for (i = 0; i < used; i++) if (buffer[i] == '\0') argc++; if (argc == 0) - { - free (buffer); - return NULL; - } + return NULL; argv = malloc (sizeof (char *) * (argc + 1)); if (argv == NULL) - { - free (buffer); - return NULL; - } + return NULL; + argc = 0; argv[argc++] = buffer; @@ -181,15 +201,19 @@ get_cmd_line_args () argv[argc] = NULL; + /* Move ownership. */ + buffer = NULL; + return argv; } static bool can_use_shortcut () { - int argc; - char **argv; + cleanup_free char **argv = NULL; + cleanup_free char *argv0 = NULL; bool ret = true; + int argc; #ifdef DISABLE_JOIN_SHORTCUT return false; @@ -199,12 +223,10 @@ can_use_shortcut () if (argv == NULL) return false; + argv0 = argv[0]; + if (strstr (argv[0], "podman") == NULL) - { - free (argv[0]); - free (argv); - return false; - } + return false; for (argc = 0; argv[argc]; argc++) { @@ -222,18 +244,32 @@ can_use_shortcut () if (argv[argc+1] != NULL && (strcmp (argv[argc], "container") == 0 || strcmp (argv[argc], "image") == 0) && - strcmp (argv[argc+1], "mount") == 0) + (strcmp (argv[argc+1], "mount") == 0 || strcmp (argv[argc+1], "scp") == 0)) { ret = false; break; } } - free (argv[0]); - free (argv); return ret; } +static int +open_namespace (int pid_to_join, const char *ns_file) +{ + char ns_path[PATH_MAX]; + int ret; + + ret = snprintf (ns_path, PATH_MAX, "/proc/%d/ns/%s", pid_to_join, ns_file); + if (ret == PATH_MAX) + { + fprintf (stderr, "internal error: namespace path too long\n"); + return -1; + } + + return open (ns_path, O_CLOEXEC | O_RDONLY); +} + int is_fd_inherited(int fd) { @@ -250,8 +286,7 @@ static void __attribute__((constructor)) init() const char *listen_pid; const char *listen_fds; const char *listen_fdnames; - - DIR *d; + cleanup_dir DIR *d = NULL; pause = getenv ("_PODMAN_PAUSE"); if (pause && pause[0]) @@ -299,7 +334,6 @@ static void __attribute__((constructor)) init() FD_SET (fd % FD_SETSIZE, &(open_files_set[fd / FD_SETSIZE])); } - closedir (d); } listen_pid = getenv("LISTEN_PID"); @@ -317,7 +351,7 @@ static void __attribute__((constructor)) init() if (saved_systemd_listen_pid == NULL || saved_systemd_listen_fds == NULL) { - fprintf (stderr, "save socket listen environments error: %s\n", strerror (errno)); + fprintf (stderr, "save socket listen environments error: %m\n"); _exit (EXIT_FAILURE); } } @@ -327,73 +361,70 @@ static void __attribute__((constructor)) init() xdg_runtime_dir = getenv ("XDG_RUNTIME_DIR"); if (geteuid () != 0 && xdg_runtime_dir && xdg_runtime_dir[0] && can_use_shortcut ()) { - int r; - int fd; + cleanup_free char *cwd = NULL; + cleanup_close int userns_fd = -1; + cleanup_close int mntns_fd = -1; + cleanup_close int fd = -1; long pid; char buf[12]; uid_t uid; gid_t gid; char path[PATH_MAX]; const char *const suffix = "/libpod/tmp/pause.pid"; - char *cwd = getcwd (NULL, 0); char uid_fmt[16]; char gid_fmt[16]; size_t len; + int r; + cwd = getcwd (NULL, 0); if (cwd == NULL) { - fprintf (stderr, "error getting current working directory: %s\n", strerror (errno)); + fprintf (stderr, "error getting current working directory: %m\n"); _exit (EXIT_FAILURE); } len = snprintf (path, PATH_MAX, "%s%s", xdg_runtime_dir, suffix); if (len >= PATH_MAX) { - fprintf (stderr, "invalid value for XDG_RUNTIME_DIR: %s", strerror (ENAMETOOLONG)); + errno = ENAMETOOLONG; + fprintf (stderr, "invalid value for XDG_RUNTIME_DIR: %m"); exit (EXIT_FAILURE); } fd = open (path, O_RDONLY); if (fd < 0) - { - free (cwd); - return; - } + return; r = TEMP_FAILURE_RETRY (read (fd, buf, sizeof (buf) - 1)); - close (fd); + if (r < 0) - { - free (cwd); - return; - } + return; buf[r] = '\0'; pid = strtol (buf, NULL, 10); if (pid == LONG_MAX) - { - free (cwd); - return; - } + return; uid = geteuid (); gid = getegid (); - sprintf (path, "/proc/%ld/ns/user", pid); - fd = open (path, O_RDONLY); - if (fd < 0 || setns (fd, 0) < 0) - { - free (cwd); - return; - } - close (fd); + userns_fd = open_namespace (pid, "user"); + if (userns_fd < 0) + return; - /* Errors here cannot be ignored as we already joined a ns. */ - sprintf (path, "/proc/%ld/ns/mnt", pid); - fd = open (path, O_RDONLY); - if (fd < 0) + mntns_fd = open_namespace (pid, "mnt"); + if (mntns_fd < 0) + return; + + if (setns (userns_fd, 0) < 0) + return; + + /* The user namespace was joined, after this point errors are + not recoverable anymore. */ + + if (setns (mntns_fd, 0) < 0) { - fprintf (stderr, "cannot open %s: %s", path, strerror (errno)); + fprintf (stderr, "cannot join mount namespace for %ld: %m", pid); exit (EXIT_FAILURE); } @@ -404,33 +435,24 @@ static void __attribute__((constructor)) init() setenv ("_CONTAINERS_ROOTLESS_UID", uid_fmt, 1); setenv ("_CONTAINERS_ROOTLESS_GID", gid_fmt, 1); - r = setns (fd, 0); - if (r < 0) - { - fprintf (stderr, "cannot join mount namespace for %ld: %s", pid, strerror (errno)); - exit (EXIT_FAILURE); - } - close (fd); - if (syscall_setresgid (0, 0, 0) < 0) { - fprintf (stderr, "cannot setresgid: %s\n", strerror (errno)); + fprintf (stderr, "cannot setresgid: %m\n"); _exit (EXIT_FAILURE); } if (syscall_setresuid (0, 0, 0) < 0) { - fprintf (stderr, "cannot setresuid: %s\n", strerror (errno)); + fprintf (stderr, "cannot setresuid: %m\n"); _exit (EXIT_FAILURE); } if (chdir (cwd) < 0) { - fprintf (stderr, "cannot chdir to %s: %s\n", cwd, strerror (errno)); + fprintf (stderr, "cannot chdir to %s: %m\n", cwd); _exit (EXIT_FAILURE); } - free (cwd); rootless_uid_init = uid; rootless_gid_init = gid; } @@ -529,7 +551,7 @@ create_pause_process (const char *pause_pid_file_path, char **argv) fd = mkstemp (tmp_file_path); if (fd < 0) { - fprintf (stderr, "error creating temporary file: %s\n", strerror (errno)); + fprintf (stderr, "error creating temporary file: %m\n"); kill (pid, SIGKILL); _exit (EXIT_FAILURE); } @@ -537,7 +559,7 @@ create_pause_process (const char *pause_pid_file_path, char **argv) r = TEMP_FAILURE_RETRY (write (fd, pid_str, strlen (pid_str))); if (r < 0) { - fprintf (stderr, "cannot write to file descriptor: %s\n", strerror (errno)); + fprintf (stderr, "cannot write to file descriptor: %m\n"); kill (pid, SIGKILL); _exit (EXIT_FAILURE); } @@ -555,7 +577,7 @@ create_pause_process (const char *pause_pid_file_path, char **argv) r = TEMP_FAILURE_RETRY (write (p[1], "0", 1)); if (r < 0) { - fprintf (stderr, "cannot write to pipe: %s\n", strerror (errno)); + fprintf (stderr, "cannot write to pipe: %m\n"); _exit (EXIT_FAILURE); } close (p[1]); @@ -590,22 +612,6 @@ create_pause_process (const char *pause_pid_file_path, char **argv) } } -static int -open_namespace (int pid_to_join, const char *ns_file) -{ - char ns_path[PATH_MAX]; - int ret; - - ret = snprintf (ns_path, PATH_MAX, "/proc/%d/ns/%s", pid_to_join, ns_file); - if (ret == PATH_MAX) - { - fprintf (stderr, "internal error: namespace path too long\n"); - return -1; - } - - return open (ns_path, O_CLOEXEC | O_RDONLY); -} - static void join_namespace_or_die (const char *name, int ns_fd) { @@ -619,18 +625,20 @@ join_namespace_or_die (const char *name, int ns_fd) int reexec_userns_join (int pid_to_join, char *pause_pid_file_path) { + cleanup_close int userns_fd = -1; + cleanup_close int mntns_fd = -1; + cleanup_free char *cwd = NULL; char uid[16]; char gid[16]; - char **argv; + cleanup_free char *argv0 = NULL; + cleanup_free char **argv = NULL; int pid; - int mnt_ns = -1; - int user_ns = -1; - char *cwd = getcwd (NULL, 0); sigset_t sigset, oldsigset; + cwd = getcwd (NULL, 0); if (cwd == NULL) { - fprintf (stderr, "error getting current working directory: %s\n", strerror (errno)); + fprintf (stderr, "error getting current working directory: %m\n"); _exit (EXIT_FAILURE); } @@ -640,32 +648,27 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) argv = get_cmd_line_args (); if (argv == NULL) { - fprintf (stderr, "cannot read argv: %s\n", strerror (errno)); + fprintf (stderr, "cannot read argv: %m\n"); _exit (EXIT_FAILURE); } - user_ns = open_namespace (pid_to_join, "user"); - if (user_ns < 0) - return user_ns; - mnt_ns = open_namespace (pid_to_join, "mnt"); - if (mnt_ns < 0) - { - close (user_ns); - return mnt_ns; - } + argv0 = argv[0]; + + userns_fd = open_namespace (pid_to_join, "user"); + if (userns_fd < 0) + return userns_fd; + mntns_fd = open_namespace (pid_to_join, "mnt"); + if (mntns_fd < 0) + return mntns_fd; pid = fork (); if (pid < 0) - fprintf (stderr, "cannot fork: %s\n", strerror (errno)); + fprintf (stderr, "cannot fork: %m\n"); if (pid) { int f; - /* We passed down these fds, close them. */ - close (user_ns); - close (mnt_ns); - for (f = 3; f <= open_files_max_fd; f++) if (is_fd_inherited (f)) close (f); @@ -681,22 +684,22 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) if (sigfillset (&sigset) < 0) { - fprintf (stderr, "cannot fill sigset: %s\n", strerror (errno)); + fprintf (stderr, "cannot fill sigset: %m\n"); _exit (EXIT_FAILURE); } if (sigdelset (&sigset, SIGCHLD) < 0) { - fprintf (stderr, "cannot sigdelset(SIGCHLD): %s\n", strerror (errno)); + fprintf (stderr, "cannot sigdelset(SIGCHLD): %m\n"); _exit (EXIT_FAILURE); } if (sigdelset (&sigset, SIGTERM) < 0) { - fprintf (stderr, "cannot sigdelset(SIGTERM): %s\n", strerror (errno)); + fprintf (stderr, "cannot sigdelset(SIGTERM): %m\n"); _exit (EXIT_FAILURE); } if (sigprocmask (SIG_BLOCK, &sigset, &oldsigset) < 0) { - fprintf (stderr, "cannot block signals: %s\n", strerror (errno)); + fprintf (stderr, "cannot block signals: %m\n"); _exit (EXIT_FAILURE); } @@ -717,33 +720,30 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) if (prctl (PR_SET_PDEATHSIG, SIGTERM, 0, 0, 0) < 0) { - fprintf (stderr, "cannot prctl(PR_SET_PDEATHSIG): %s\n", strerror (errno)); + fprintf (stderr, "cannot prctl(PR_SET_PDEATHSIG): %m\n"); _exit (EXIT_FAILURE); } - join_namespace_or_die ("user", user_ns); - join_namespace_or_die ("mnt", mnt_ns); - close (user_ns); - close (mnt_ns); + join_namespace_or_die ("user", userns_fd); + join_namespace_or_die ("mnt", mntns_fd); if (syscall_setresgid (0, 0, 0) < 0) { - fprintf (stderr, "cannot setresgid: %s\n", strerror (errno)); + fprintf (stderr, "cannot setresgid: %m\n"); _exit (EXIT_FAILURE); } if (syscall_setresuid (0, 0, 0) < 0) { - fprintf (stderr, "cannot setresuid: %s\n", strerror (errno)); + fprintf (stderr, "cannot setresuid: %m\n"); _exit (EXIT_FAILURE); } if (chdir (cwd) < 0) { - fprintf (stderr, "cannot chdir to %s: %s\n", cwd, strerror (errno)); + fprintf (stderr, "cannot chdir to %s: %m\n", cwd); _exit (EXIT_FAILURE); } - free (cwd); if (pause_pid_file_path && pause_pid_file_path[0] != '\0') { @@ -752,7 +752,7 @@ reexec_userns_join (int pid_to_join, char *pause_pid_file_path) } if (sigprocmask (SIG_SETMASK, &oldsigset, NULL) < 0) { - fprintf (stderr, "cannot block signals: %s\n", strerror (errno)); + fprintf (stderr, "cannot block signals: %m\n"); _exit (EXIT_FAILURE); } @@ -784,7 +784,7 @@ static int copy_file_to_fd (const char *file_to_read, int outfd) { char buf[512]; - int fd; + cleanup_close int fd = -1; fd = open (file_to_read, O_RDONLY); if (fd < 0) @@ -796,10 +796,7 @@ copy_file_to_fd (const char *file_to_read, int outfd) r = TEMP_FAILURE_RETRY (read (fd, buf, sizeof buf)); if (r < 0) - { - close (fd); - return r; - } + return r; if (r == 0) break; @@ -808,43 +805,40 @@ copy_file_to_fd (const char *file_to_read, int outfd) { w = TEMP_FAILURE_RETRY (write (outfd, &buf[t], r - t)); if (w < 0) - { - close (fd); - return w; - } + return w; t += w; } } - close (fd); return 0; } int reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_read, int outputfd) { + cleanup_free char **argv = NULL; + cleanup_free char *argv0 = NULL; + cleanup_free char *cwd = NULL; + sigset_t sigset, oldsigset; int ret; pid_t pid; char b; - char **argv; char uid[16]; char gid[16]; - char *cwd = getcwd (NULL, 0); - sigset_t sigset, oldsigset; + cwd = getcwd (NULL, 0); if (cwd == NULL) { - fprintf (stderr, "error getting current working directory: %s\n", strerror (errno)); + fprintf (stderr, "error getting current working directory: %m\n"); _exit (EXIT_FAILURE); } - sprintf (uid, "%d", geteuid ()); sprintf (gid, "%d", getegid ()); pid = syscall_clone (CLONE_NEWUSER|CLONE_NEWNS|SIGCHLD, NULL); if (pid < 0) { - fprintf (stderr, "cannot clone: %s\n", strerror (errno)); + fprintf (stderr, "cannot clone: %m\n"); check_proc_sys_userns_file (_max_user_namespaces); check_proc_sys_userns_file (_unprivileged_user_namespaces); } @@ -872,32 +866,34 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re if (sigfillset (&sigset) < 0) { - fprintf (stderr, "cannot fill sigset: %s\n", strerror (errno)); + fprintf (stderr, "cannot fill sigset: %m\n"); _exit (EXIT_FAILURE); } if (sigdelset (&sigset, SIGCHLD) < 0) { - fprintf (stderr, "cannot sigdelset(SIGCHLD): %s\n", strerror (errno)); + fprintf (stderr, "cannot sigdelset(SIGCHLD): %m\n"); _exit (EXIT_FAILURE); } if (sigdelset (&sigset, SIGTERM) < 0) { - fprintf (stderr, "cannot sigdelset(SIGTERM): %s\n", strerror (errno)); + fprintf (stderr, "cannot sigdelset(SIGTERM): %m\n"); _exit (EXIT_FAILURE); } if (sigprocmask (SIG_BLOCK, &sigset, &oldsigset) < 0) { - fprintf (stderr, "cannot block signals: %s\n", strerror (errno)); + fprintf (stderr, "cannot block signals: %m\n"); _exit (EXIT_FAILURE); } argv = get_cmd_line_args (); if (argv == NULL) { - fprintf (stderr, "cannot read argv: %s\n", strerror (errno)); + fprintf (stderr, "cannot read argv: %m\n"); _exit (EXIT_FAILURE); } + argv0 = argv[0]; + if (do_socket_activation) { char s[32]; @@ -916,7 +912,7 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re ret = TEMP_FAILURE_RETRY (read (ready, &b, 1)); if (ret < 0) { - fprintf (stderr, "cannot read from sync pipe: %s\n", strerror (errno)); + fprintf (stderr, "cannot read from sync pipe: %m\n"); _exit (EXIT_FAILURE); } if (ret != 1 || b != '0') @@ -924,25 +920,24 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re if (syscall_setresgid (0, 0, 0) < 0) { - fprintf (stderr, "cannot setresgid: %s\n", strerror (errno)); + fprintf (stderr, "cannot setresgid: %m\n"); TEMP_FAILURE_RETRY (write (ready, "1", 1)); _exit (EXIT_FAILURE); } if (syscall_setresuid (0, 0, 0) < 0) { - fprintf (stderr, "cannot setresuid: %s\n", strerror (errno)); + fprintf (stderr, "cannot setresuid: %m\n"); TEMP_FAILURE_RETRY (write (ready, "1", 1)); _exit (EXIT_FAILURE); } if (chdir (cwd) < 0) { - fprintf (stderr, "cannot chdir to %s: %s\n", cwd, strerror (errno)); + fprintf (stderr, "cannot chdir to %s: %m\n", cwd); TEMP_FAILURE_RETRY (write (ready, "1", 1)); _exit (EXIT_FAILURE); } - free (cwd); if (pause_pid_file_path && pause_pid_file_path[0] != '\0') { @@ -956,14 +951,14 @@ reexec_in_user_namespace (int ready, char *pause_pid_file_path, char *file_to_re ret = TEMP_FAILURE_RETRY (write (ready, "0", 1)); if (ret < 0) { - fprintf (stderr, "cannot write to ready pipe: %s\n", strerror (errno)); - _exit (EXIT_FAILURE); + fprintf (stderr, "cannot write to ready pipe: %m\n"); + _exit (EXIT_FAILURE); } close (ready); if (sigprocmask (SIG_SETMASK, &oldsigset, NULL) < 0) { - fprintf (stderr, "cannot block signals: %s\n", strerror (errno)); + fprintf (stderr, "cannot block signals: %m\n"); _exit (EXIT_FAILURE); } diff --git a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless_linux.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go similarity index 93% rename from vendor/github.com/containers/podman/v3/pkg/rootless/rootless_linux.go rename to vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go index 7f922866646..5af9a978b0e 100644 --- a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package rootless @@ -18,17 +19,18 @@ import ( "sync" "unsafe" - "github.com/containers/podman/v3/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/errorhandling" "github.com/containers/storage/pkg/idtools" pmount "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" "golang.org/x/sys/unix" ) /* -#cgo remoteclient CFLAGS: -Wall -Werror -DDISABLE_JOIN_SHORTCUT +#cgo remote CFLAGS: -Wall -Werror -DDISABLE_JOIN_SHORTCUT #include #include extern uid_t rootless_uid(); @@ -113,8 +115,14 @@ func GetRootlessGID() int { func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) error { var tool = "newuidmap" + mode := os.ModeSetuid + cap := capability.CAP_SETUID + idtype := "setuid" if !uid { tool = "newgidmap" + mode = os.ModeSetgid + cap = capability.CAP_SETGID + idtype = "setgid" } path, err := exec.LookPath(tool) if err != nil { @@ -145,8 +153,14 @@ func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) err } if output, err := cmd.CombinedOutput(); err != nil { - logrus.Debugf("error from %s: %s", tool, output) - return errors.Wrapf(err, "cannot setup namespace using %s", tool) + logrus.Errorf("running `%s`: %s", strings.Join(args, " "), output) + errorStr := fmt.Sprintf("cannot setup namespace using %q", path) + if isSet, err := unshare.IsSetID(cmd.Path, mode, cap); err != nil { + logrus.Errorf("Failed to check for %s on %s: %v", idtype, path, err) + } else if !isSet { + errorStr = fmt.Sprintf("%s: should have %s or have filecaps %s", errorStr, idtype, idtype) + } + return errors.Wrapf(err, errorStr) } return nil } @@ -173,7 +187,7 @@ func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) { ret := C.reexec_in_user_namespace_wait(pidC, 0) if ret < 0 { - return false, -1, errors.New("error waiting for the re-exec process") + return false, -1, errors.New("waiting for the re-exec process") } return true, int(ret), nil @@ -325,7 +339,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo uidsMapped = err == nil } if !uidsMapped { - logrus.Warnf("Using rootless single mapping into the namespace. This might break some images. Check /etc/subuid and /etc/subgid for adding sub*ids") + logrus.Warnf("Using rootless single mapping into the namespace. This might break some images. Check /etc/subuid and /etc/subgid for adding sub*ids if not using a network user") setgroups := fmt.Sprintf("/proc/%d/setgroups", pid) err = ioutil.WriteFile(setgroups, []byte("deny\n"), 0666) if err != nil { @@ -373,7 +387,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo if fileOutput != nil { ret := C.reexec_in_user_namespace_wait(pidC, 0) if ret < 0 { - return false, -1, errors.New("error waiting for the re-exec process") + return false, -1, errors.New("waiting for the re-exec process") } return true, 0, nil @@ -390,11 +404,11 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo return joinUserAndMountNS(uint(pid), "") } } - return false, -1, errors.Wrapf(err, "error setting up the process") + return false, -1, errors.New("setting up the process") } if b[0] != '0' { - return false, -1, errors.Wrapf(err, "error setting up the process") + return false, -1, errors.New("setting up the process") } signals := []os.Signal{} @@ -424,7 +438,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ boo ret := C.reexec_in_user_namespace_wait(pidC, 0) if ret < 0 { - return false, -1, errors.New("error waiting for the re-exec process") + return false, -1, errors.New("waiting for the re-exec process") } return true, int(ret), nil diff --git a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go similarity index 98% rename from vendor/github.com/containers/podman/v3/pkg/rootless/rootless_unsupported.go rename to vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go index 7dfb4a4b222..fe164e2350c 100644 --- a/vendor/github.com/containers/podman/v3/pkg/rootless/rootless_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux || !cgo // +build !linux !cgo package rootless diff --git a/vendor/github.com/containers/podman/v4/pkg/rootlessport/rootlessport_linux.go b/vendor/github.com/containers/podman/v4/pkg/rootlessport/rootlessport_linux.go new file mode 100644 index 00000000000..6918a7e8c2b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/rootlessport/rootlessport_linux.go @@ -0,0 +1,35 @@ +//go:build linux +// +build linux + +// Package rootlessport provides reexec for RootlessKit-based port forwarder. +// +// init() contains reexec.Register() for ReexecKey . +// +// The reexec requires Config to be provided via stdin. +// +// The reexec writes human-readable error message on stdout on error. +// +// Debug log is printed on stderr. +package rootlessport + +import ( + "github.com/containers/common/libnetwork/types" +) + +const ( + // BinaryName is the binary name for the parent process. + BinaryName = "rootlessport" +) + +// Config needs to be provided to the process via stdin as a JSON string. +// stdin needs to be closed after the message has been written. +type Config struct { + Mappings []types.PortMapping + NetNSPath string + ExitFD int + ReadyFD int + TmpDir string + ChildIP string + ContainerID string + RootlessCNI bool +} diff --git a/vendor/github.com/containers/podman/v3/pkg/seccomp/seccomp.go b/vendor/github.com/containers/podman/v4/pkg/seccomp/seccomp.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/seccomp/seccomp.go rename to vendor/github.com/containers/podman/v4/pkg/seccomp/seccomp.go diff --git a/vendor/github.com/containers/podman/v3/pkg/selinux/selinux.go b/vendor/github.com/containers/podman/v4/pkg/selinux/selinux.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/selinux/selinux.go rename to vendor/github.com/containers/podman/v4/pkg/selinux/selinux.go diff --git a/vendor/github.com/containers/podman/v3/pkg/servicereaper/service.go b/vendor/github.com/containers/podman/v4/pkg/servicereaper/service.go similarity index 97% rename from vendor/github.com/containers/podman/v3/pkg/servicereaper/service.go rename to vendor/github.com/containers/podman/v4/pkg/servicereaper/service.go index e105148f071..61445bc197b 100644 --- a/vendor/github.com/containers/podman/v3/pkg/servicereaper/service.go +++ b/vendor/github.com/containers/podman/v4/pkg/servicereaper/service.go @@ -1,4 +1,5 @@ -//+build linux +//go:build linux +// +build linux package servicereaper diff --git a/vendor/github.com/containers/podman/v3/pkg/signal/signal_common.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go similarity index 94% rename from vendor/github.com/containers/podman/v3/pkg/signal/signal_common.go rename to vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go index 8ff4b4dbf0f..5ea67843a5d 100644 --- a/vendor/github.com/containers/podman/v3/pkg/signal/signal_common.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go @@ -25,7 +25,7 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) { } // ParseSignalNameOrNumber translates a string to a valid syscall signal. Input -// can be a name or number representation i.e. "KILL" "9" +// can be a name or number representation i.e. "KILL" "9". func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) { basename := strings.TrimPrefix(rawSignal, "-") s, err := ParseSignal(basename) diff --git a/vendor/github.com/containers/podman/v3/pkg/signal/signal_linux.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v3/pkg/signal/signal_linux.go rename to vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go index 305b9d21f41..21e09c9fef0 100644 --- a/vendor/github.com/containers/podman/v3/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go @@ -1,5 +1,5 @@ -// +build linux -// +build !mips,!mipsle,!mips64,!mips64le +//go:build linux && !mips && !mipsle && !mips64 && !mips64le +// +build linux,!mips,!mipsle,!mips64,!mips64le // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/podman/v3/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go similarity index 97% rename from vendor/github.com/containers/podman/v3/pkg/signal/signal_linux_mipsx.go rename to vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go index 45c9d5af1e6..52b07aaf463 100644 --- a/vendor/github.com/containers/podman/v3/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go @@ -1,3 +1,4 @@ +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go new file mode 100644 index 00000000000..c0aa62d2107 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go @@ -0,0 +1,100 @@ +//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd netbsd openbsd solaris zos + +// Signal handling for Linux only. +package signal + +import ( + "os" + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 + + SIGWINCH = syscall.SIGWINCH +) + +// signalMap is a map of Linux signals. +// These constants are sourced from the Linux version of golang.org/x/sys/unix +// (I don't see much risk of this changing). +// This should work as long as Podman only runs containers on Linux, which seems +// a safe assumption for now. +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.Signal(0x6), + "ALRM": syscall.Signal(0xe), + "BUS": syscall.Signal(0x7), + "CHLD": syscall.Signal(0x11), + "CLD": syscall.Signal(0x11), + "CONT": syscall.Signal(0x12), + "FPE": syscall.Signal(0x8), + "HUP": syscall.Signal(0x1), + "ILL": syscall.Signal(0x4), + "INT": syscall.Signal(0x2), + "IO": syscall.Signal(0x1d), + "IOT": syscall.Signal(0x6), + "KILL": syscall.Signal(0x9), + "PIPE": syscall.Signal(0xd), + "POLL": syscall.Signal(0x1d), + "PROF": syscall.Signal(0x1b), + "PWR": syscall.Signal(0x1e), + "QUIT": syscall.Signal(0x3), + "SEGV": syscall.Signal(0xb), + "STKFLT": syscall.Signal(0x10), + "STOP": syscall.Signal(0x13), + "SYS": syscall.Signal(0x1f), + "TERM": syscall.Signal(0xf), + "TRAP": syscall.Signal(0x5), + "TSTP": syscall.Signal(0x14), + "TTIN": syscall.Signal(0x15), + "TTOU": syscall.Signal(0x16), + "URG": syscall.Signal(0x17), + "USR1": syscall.Signal(0xa), + "USR2": syscall.Signal(0xc), + "VTALRM": syscall.Signal(0x1a), + "WINCH": syscall.Signal(0x1c), + "XCPU": syscall.Signal(0x18), + "XFSZ": syscall.Signal(0x19), + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + panic("Unsupported on non-linux platforms") +} diff --git a/vendor/github.com/containers/podman/v3/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go similarity index 93% rename from vendor/github.com/containers/podman/v3/pkg/signal/signal_unsupported.go rename to vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go index 9d1733c02d5..d8bba7c905c 100644 --- a/vendor/github.com/containers/podman/v3/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/config_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go similarity index 95% rename from vendor/github.com/containers/podman/v3/pkg/specgen/config_unsupported.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go index 70a60ac47c0..a6bf77277ce 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/config_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/config_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package specgen diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/container_validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go similarity index 84% rename from vendor/github.com/containers/podman/v3/pkg/specgen/container_validate.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go index caea51ea84f..355fbc3681a 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/container_validate.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go @@ -4,9 +4,9 @@ import ( "strconv" "strings" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) @@ -29,30 +29,22 @@ func exclusiveOptions(opt1, opt2 string) error { // Validate verifies that the given SpecGenerator is valid and satisfies required // input for creating a container. func (s *SpecGenerator) Validate() error { - if rootless.IsRootless() && len(s.CNINetworks) == 0 { - if s.StaticIP != nil || s.StaticIPv6 != nil { - return ErrNoStaticIPRootless - } - if s.StaticMAC != nil { - return ErrNoStaticMACRootless - } - } - // Containers being added to a pod cannot have certain network attributes // associated with them because those should be on the infra container. if len(s.Pod) > 0 && s.NetNS.NSMode == FromPod { - if s.StaticIP != nil || s.StaticIPv6 != nil { - return errors.Wrap(define.ErrNetworkOnPodContainer, "static ip addresses must be defined when the pod is created") - } - if s.StaticMAC != nil { - return errors.Wrap(define.ErrNetworkOnPodContainer, "MAC addresses must be defined when the pod is created") - } - if len(s.CNINetworks) > 0 { + if len(s.Networks) > 0 { return errors.Wrap(define.ErrNetworkOnPodContainer, "networks must be defined when the pod is created") } if len(s.PortMappings) > 0 || s.PublishExposedPorts { return errors.Wrap(define.ErrNetworkOnPodContainer, "published or exposed ports must be defined when the pod is created") } + if len(s.HostAdd) > 0 { + return errors.Wrap(define.ErrNetworkOnPodContainer, "extra host entries must be specified on the pod") + } + } + + if s.NetNS.IsContainer() && len(s.HostAdd) > 0 { + return errors.Wrap(ErrInvalidSpecConfig, "cannot set extra host entries when the container is joined to another containers network namespace") } // @@ -91,17 +83,13 @@ func (s *SpecGenerator) Validate() error { s.ContainerStorageConfig.ImageVolumeMode, strings.Join(ImageVolumeModeValues, ",")) } // shmsize conflicts with IPC namespace - if s.ContainerStorageConfig.ShmSize != nil && !s.ContainerStorageConfig.IpcNS.IsPrivate() { - return errors.New("cannot set shmsize when running in the host IPC Namespace") + if s.ContainerStorageConfig.ShmSize != nil && (s.ContainerStorageConfig.IpcNS.IsHost() || s.ContainerStorageConfig.IpcNS.IsNone()) { + return errors.Errorf("cannot set shmsize when running in the %s IPC Namespace", s.ContainerStorageConfig.IpcNS) } // // ContainerSecurityConfig // - // capadd and privileged are exclusive - if len(s.CapAdd) > 0 && s.Privileged { - return exclusiveOptions("CapAdd", "privileged") - } // userns and idmappings conflict if s.UserNS.IsPrivate() && s.IDMappings == nil { return errors.Wrap(ErrInvalidSpecConfig, "IDMappings are required when not creating a User namespace") @@ -134,19 +122,19 @@ func (s *SpecGenerator) Validate() error { } // TODO the specgen does not appear to handle this? Should it - //switch config.Cgroup.Cgroups { - //case "disabled": + // switch config.Cgroup.Cgroups { + // case "disabled": // if addedResources { // return errors.New("cannot specify resource limits when cgroups are disabled is specified") // } // configSpec.Linux.Resources = &spec.LinuxResources{} - //case "enabled", "no-conmon", "": + // case "enabled", "no-conmon", "": // // Do nothing - //default: + // default: // return errors.New("unrecognized option for cgroups; supported are 'default', 'disabled', 'no-conmon'") - //} + // } invalidUlimitFormatError := errors.New("invalid default ulimit definition must be form of type=soft:hard") - //set ulimits if not rootless + // set ulimits if not rootless if len(s.ContainerResourceConfig.Rlimits) < 1 && !rootless.IsRootless() { // Containers common defines this as something like nproc=4194304:4194304 tmpnproc := containerConfig.Ulimits() @@ -181,7 +169,7 @@ func (s *SpecGenerator) Validate() error { if err := s.UtsNS.validate(); err != nil { return err } - if err := s.IpcNS.validate(); err != nil { + if err := validateIPCNS(&s.IpcNS); err != nil { return err } if err := s.PidNS.validate(); err != nil { @@ -204,5 +192,10 @@ func (s *SpecGenerator) Validate() error { if err := validateNetNS(&s.NetNS); err != nil { return err } + if s.NetNS.NSMode != Bridge && len(s.Networks) > 0 { + // Note that we also get the ip and mac in the networks map + return errors.New("Networks and static ip/mac address can only be used with Bridge mode networking") + } + return nil } diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go index 2d1e2b288a6..8f83fc09b15 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go @@ -2,14 +2,15 @@ package generate import ( "fmt" + "io/fs" "io/ioutil" "os" "path" "path/filepath" "strings" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" @@ -47,17 +48,6 @@ func addPrivilegedDevices(g *generate.Generator) error { if _, found := mounts[d.Path]; found { continue } - st, err := os.Stat(d.Path) - if err != nil { - if err == unix.EPERM { - continue - } - return err - } - // Skip devices that the user has not access to. - if st.Mode()&0007 == 0 { - continue - } newMounts = append(newMounts, devMnt) } g.Config.Mounts = append(newMounts, g.Config.Mounts...) @@ -112,8 +102,8 @@ func DevicesFromPath(g *generate.Generator, devicePath string) error { } // mount the internal devices recursively - if err := filepath.Walk(resolvedDevicePath, func(dpath string, f os.FileInfo, e error) error { - if f.Mode()&os.ModeDevice == os.ModeDevice { + if err := filepath.WalkDir(resolvedDevicePath, func(dpath string, d fs.DirEntry, e error) error { + if d.Type()&os.ModeDevice == os.ModeDevice { found = true device := fmt.Sprintf("%s:%s", dpath, filepath.Join(dest, strings.TrimPrefix(dpath, src))) if devmode != "" { @@ -273,8 +263,8 @@ func addDevice(g *generate.Generator, device string) error { // ParseDevice parses device mapping string to a src, dest & permissions string func ParseDevice(device string) (string, string, string, error) { //nolint - src := "" - dst := "" + var src string + var dst string permissions := "rwm" arr := strings.Split(device, ":") switch len(arr) { diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux_cgo.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_cgo.go similarity index 94% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux_cgo.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_cgo.go index 6ffbf69c10c..efab6679a90 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux_cgo.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_cgo.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package generate @@ -8,8 +9,8 @@ import ( "github.com/containers/common/libimage" goSeccomp "github.com/containers/common/pkg/seccomp" - "github.com/containers/podman/v3/pkg/seccomp" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/pkg/seccomp" + "github.com/containers/podman/v4/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux_nocgo.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_nocgo.go similarity index 81% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux_nocgo.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_nocgo.go index 4a1880b745b..99b0c4eb241 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/config_linux_nocgo.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_nocgo.go @@ -1,3 +1,4 @@ +//go:build linux && !cgo // +build linux,!cgo package generate @@ -6,7 +7,7 @@ import ( "errors" "github.com/containers/common/libimage" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/container.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go similarity index 55% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/container.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go index f126aa0180e..63caaa77c9f 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/container.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go @@ -2,16 +2,19 @@ package generate import ( "context" + "encoding/json" "os" "strings" + "time" "github.com/containers/common/libimage" - "github.com/containers/podman/v3/libpod" - "github.com/containers/podman/v3/libpod/define" - ann "github.com/containers/podman/v3/pkg/annotations" - envLib "github.com/containers/podman/v3/pkg/env" - "github.com/containers/podman/v3/pkg/signal" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v4/libpod/define" + ann "github.com/containers/podman/v4/pkg/annotations" + envLib "github.com/containers/podman/v4/pkg/env" + "github.com/containers/podman/v4/pkg/signal" + "github.com/containers/podman/v4/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -26,7 +29,7 @@ func getImageFromSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGen // Image may already have been set in the generator. image, resolvedName := s.GetImage() if image != nil { - inspectData, err := image.Inspect(ctx, false) + inspectData, err := image.Inspect(ctx, nil) if err != nil { return nil, "", nil, err } @@ -39,7 +42,7 @@ func getImageFromSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGen return nil, "", nil, err } s.SetImage(image, resolvedName) - inspectData, err := image.Inspect(ctx, false) + inspectData, err := image.Inspect(ctx, nil) if err != nil { return nil, "", nil, err } @@ -55,7 +58,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat return nil, err } if inspectData != nil { - inspectData, err = newImage.Inspect(ctx, false) + inspectData, err = newImage.Inspect(ctx, nil) if err != nil { return nil, err } @@ -64,6 +67,22 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat // NOTE: the health check is only set for Docker images // but inspect will take care of it. s.HealthConfig = inspectData.HealthCheck + if s.HealthConfig != nil { + if s.HealthConfig.Timeout == 0 { + hct, err := time.ParseDuration(define.DefaultHealthCheckTimeout) + if err != nil { + return nil, err + } + s.HealthConfig.Timeout = hct + } + if s.HealthConfig.Interval == 0 { + hct, err := time.ParseDuration(define.DefaultHealthCheckInterval) + if err != nil { + return nil, err + } + s.HealthConfig.Interval = hct + } + } } // Image stop signal @@ -88,9 +107,6 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat if err != nil { return nil, errors.Wrap(err, "error parsing fields in containers.conf") } - if defaultEnvs["container"] == "" { - defaultEnvs["container"] = "podman" - } var envs map[string]string // Image Environment defaults @@ -101,9 +117,16 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat if err != nil { return nil, errors.Wrap(err, "Env fields from image failed to parse") } - defaultEnvs = envLib.Join(defaultEnvs, envs) + defaultEnvs = envLib.Join(envLib.DefaultEnvVariables(), envLib.Join(defaultEnvs, envs)) } + for _, e := range s.UnsetEnv { + delete(defaultEnvs, e) + } + + if s.UnsetEnvAll { + defaultEnvs = make(map[string]string) + } // First transform the os env into a map. We need it for the labels later in // any case. osEnv, err := envLib.ParseSlice(os.Environ()) @@ -114,16 +137,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat if s.EnvHost { defaultEnvs = envLib.Join(defaultEnvs, osEnv) } else if s.HTTPProxy { - for _, envSpec := range []string{ - "http_proxy", - "HTTP_PROXY", - "https_proxy", - "HTTPS_PROXY", - "ftp_proxy", - "FTP_PROXY", - "no_proxy", - "NO_PROXY", - } { + for _, envSpec := range config.ProxyEnv { if v, ok := osEnv[envSpec]; ok { defaultEnvs[envSpec] = v } @@ -152,7 +166,9 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat // Add annotations from the image for k, v := range inspectData.Annotations { - annotations[k] = v + if !define.IsReservedAnnotation(k) { + annotations[k] = v + } } } @@ -223,6 +239,10 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat } } + if s.CgroupsMode == "" { + s.CgroupsMode = rtc.Cgroups() + } + // If caller did not specify Pids Limits load default if s.ResourceLimits == nil || s.ResourceLimits.Pids == nil { if s.CgroupsMode != "disabled" { @@ -325,3 +345,184 @@ func FinishThrottleDevices(s *specgen.SpecGenerator) error { } return nil } + +// ConfigToSpec takes a completed container config and converts it back into a specgenerator for purposes of cloning an existing container +func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, contaierID string) (*libpod.Container, *libpod.InfraInherit, error) { + c, err := rt.LookupContainer(contaierID) + if err != nil { + return nil, nil, err + } + conf := c.Config() + + tmpSystemd := conf.Systemd + tmpMounts := conf.Mounts + + conf.Systemd = nil + conf.Mounts = []string{} + + if specg == nil { + specg = &specgen.SpecGenerator{} + } + + specg.Pod = conf.Pod + + matching, err := json.Marshal(conf) + if err != nil { + return nil, nil, err + } + + err = json.Unmarshal(matching, specg) + if err != nil { + return nil, nil, err + } + + conf.Systemd = tmpSystemd + conf.Mounts = tmpMounts + + if conf.Spec != nil && conf.Spec.Linux != nil && conf.Spec.Linux.Resources != nil { + if specg.ResourceLimits == nil { + specg.ResourceLimits = conf.Spec.Linux.Resources + } + } + + nameSpaces := []string{"pid", "net", "cgroup", "ipc", "uts", "user"} + containers := []string{conf.PIDNsCtr, conf.NetNsCtr, conf.CgroupNsCtr, conf.IPCNsCtr, conf.UTSNsCtr, conf.UserNsCtr} + place := []*specgen.Namespace{&specg.PidNS, &specg.NetNS, &specg.CgroupNS, &specg.IpcNS, &specg.UtsNS, &specg.UserNS} + for i, ns := range containers { + if len(ns) > 0 { + ns := specgen.Namespace{NSMode: specgen.FromContainer, Value: ns} + place[i] = &ns + } else { + switch nameSpaces[i] { + case "pid": + specg.PidNS = specgen.Namespace{NSMode: specgen.Default} // default + case "net": + switch { + case conf.NetMode.IsBridge(): + toExpose := make(map[uint16]string, len(conf.ExposedPorts)) + for _, expose := range []map[uint16][]string{conf.ExposedPorts} { + for port, proto := range expose { + toExpose[port] = strings.Join(proto, ",") + } + } + specg.Expose = toExpose + specg.PortMappings = conf.PortMappings + specg.NetNS = specgen.Namespace{NSMode: specgen.Bridge} + case conf.NetMode.IsSlirp4netns(): + toExpose := make(map[uint16]string, len(conf.ExposedPorts)) + for _, expose := range []map[uint16][]string{conf.ExposedPorts} { + for port, proto := range expose { + toExpose[port] = strings.Join(proto, ",") + } + } + specg.Expose = toExpose + specg.PortMappings = conf.PortMappings + netMode := strings.Split(string(conf.NetMode), ":") + var val string + if len(netMode) > 1 { + val = netMode[1] + } + specg.NetNS = specgen.Namespace{NSMode: specgen.Slirp, Value: val} + case conf.NetMode.IsPrivate(): + specg.NetNS = specgen.Namespace{NSMode: specgen.Private} + case conf.NetMode.IsDefault(): + specg.NetNS = specgen.Namespace{NSMode: specgen.Default} + case conf.NetMode.IsUserDefined(): + specg.NetNS = specgen.Namespace{NSMode: specgen.Path, Value: strings.Split(string(conf.NetMode), ":")[1]} + case conf.NetMode.IsContainer(): + specg.NetNS = specgen.Namespace{NSMode: specgen.FromContainer, Value: strings.Split(string(conf.NetMode), ":")[1]} + case conf.NetMode.IsPod(): + specg.NetNS = specgen.Namespace{NSMode: specgen.FromPod, Value: strings.Split(string(conf.NetMode), ":")[1]} + } + case "cgroup": + specg.CgroupNS = specgen.Namespace{NSMode: specgen.Default} // default + case "ipc": + switch conf.ShmDir { + case "/dev/shm": + specg.IpcNS = specgen.Namespace{NSMode: specgen.Host} + case "": + specg.IpcNS = specgen.Namespace{NSMode: specgen.None} + default: + specg.IpcNS = specgen.Namespace{NSMode: specgen.Default} // default + } + case "uts": + specg.UtsNS = specgen.Namespace{NSMode: specgen.Default} // default + case "user": + if conf.AddCurrentUserPasswdEntry { + specg.UserNS = specgen.Namespace{NSMode: specgen.KeepID} + } else { + specg.UserNS = specgen.Namespace{NSMode: specgen.Default} // default + } + } + } + } + + specg.IDMappings = &conf.IDMappings + specg.ContainerCreateCommand = conf.CreateCommand + if len(specg.Rootfs) == 0 { + specg.Rootfs = conf.Rootfs + } + if len(specg.Image) == 0 { + specg.Image = conf.RootfsImageID + } + var named []*specgen.NamedVolume + if len(conf.NamedVolumes) != 0 { + for _, v := range conf.NamedVolumes { + named = append(named, &specgen.NamedVolume{ + Name: v.Name, + Dest: v.Dest, + Options: v.Options, + }) + } + } + specg.Volumes = named + var image []*specgen.ImageVolume + if len(conf.ImageVolumes) != 0 { + for _, v := range conf.ImageVolumes { + image = append(image, &specgen.ImageVolume{ + Source: v.Source, + Destination: v.Dest, + ReadWrite: v.ReadWrite, + }) + } + } + specg.ImageVolumes = image + var overlay []*specgen.OverlayVolume + if len(conf.OverlayVolumes) != 0 { + for _, v := range conf.OverlayVolumes { + overlay = append(overlay, &specgen.OverlayVolume{ + Source: v.Source, + Destination: v.Dest, + Options: v.Options, + }) + } + } + specg.OverlayVolumes = overlay + _, mounts := c.SortUserVolumes(c.Spec()) + specg.Mounts = mounts + specg.HostDeviceList = conf.DeviceHostSrc + specg.Networks = conf.Networks + + mapSecurityConfig(conf, specg) + + if c.IsInfra() { // if we are creating this spec for a pod's infra ctr, map the compatible options + spec, err := json.Marshal(specg) + if err != nil { + return nil, nil, err + } + infraInherit := &libpod.InfraInherit{} + err = json.Unmarshal(spec, infraInherit) + return c, infraInherit, err + } + // else just return the container + return c, nil, nil +} + +// mapSecurityConfig takes a libpod.ContainerSecurityConfig and converts it to a specgen.ContinerSecurityConfig +func mapSecurityConfig(c *libpod.ContainerConfig, s *specgen.SpecGenerator) { + s.Privileged = c.Privileged + s.SelinuxOpts = append(s.SelinuxOpts, c.LabelOpts...) + s.User = c.User + s.Groups = c.Groups + s.HostUsers = c.HostUsers +} diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/container_create.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go similarity index 65% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/container_create.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go index 92c0f22d9f2..19a2b702c2e 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/container_create.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go @@ -2,18 +2,17 @@ package generate import ( "context" - "fmt" - "os" + "encoding/json" "path/filepath" "strings" - cdi "github.com/container-orchestrated-devices/container-device-interface/pkg" + cdi "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" "github.com/containers/common/libimage" - "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/pkg/util" - "github.com/containers/storage/types" + "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -23,7 +22,7 @@ import ( // MakeContainer creates a container based on the SpecGenerator. // Returns the created, container and any warnings resulting from creating the // container, or an error. -func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator) (*spec.Spec, *specgen.SpecGenerator, []libpod.CtrCreateOption, error) { +func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator, clone bool, c *libpod.Container) (*spec.Spec, *specgen.SpecGenerator, []libpod.CtrCreateOption, error) { rtc, err := rt.GetConfigNoCopy() if err != nil { return nil, nil, nil, err @@ -31,43 +30,30 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener // If joining a pod, retrieve the pod for use, and its infra container var pod *libpod.Pod - var infraConfig *libpod.ContainerConfig + var infra *libpod.Container if s.Pod != "" { pod, err = rt.LookupPod(s.Pod) if err != nil { return nil, nil, nil, errors.Wrapf(err, "error retrieving pod %s", s.Pod) } if pod.HasInfraContainer() { - infra, err := pod.InfraContainer() + infra, err = pod.InfraContainer() if err != nil { return nil, nil, nil, err } - infraConfig = infra.Config() } } - if infraConfig != nil && (len(infraConfig.NamedVolumes) > 0 || len(infraConfig.UserVolumes) > 0 || len(infraConfig.ImageVolumes) > 0 || len(infraConfig.OverlayVolumes) > 0) { - s.VolumesFrom = append(s.VolumesFrom, infraConfig.ID) - } - - if infraConfig != nil && len(infraConfig.Spec.Linux.Devices) > 0 { - s.DevicesFrom = append(s.DevicesFrom, infraConfig.ID) - } - if infraConfig != nil && infraConfig.Spec.Linux.Resources != nil && infraConfig.Spec.Linux.Resources.BlockIO != nil && len(infraConfig.Spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice) > 0 { - tempDev := make(map[string]spec.LinuxThrottleDevice) - for _, val := range infraConfig.Spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice { - nodes, err := util.FindDeviceNodes() - if err != nil { - return nil, nil, nil, err - } - key := fmt.Sprintf("%d:%d", val.Major, val.Minor) - tempDev[nodes[key]] = spec.LinuxThrottleDevice{Rate: uint64(val.Rate)} - } - for i, dev := range s.ThrottleReadBpsDevice { - tempDev[i] = dev + options := []libpod.CtrCreateOption{} + compatibleOptions := &libpod.InfraInherit{} + var infraSpec *spec.Spec + if infra != nil { + options, infraSpec, compatibleOptions, err = Inherit(*infra, s, rt) + if err != nil { + return nil, nil, nil, err } - s.ThrottleReadBpsDevice = tempDev } + if err := FinishThrottleDevices(s); err != nil { return nil, nil, nil, err } @@ -99,6 +85,12 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener return nil, nil, nil, err } s.UserNS = defaultNS + + mappings, err := util.ParseIDMapping(namespaces.UsernsMode(s.UserNS.NSMode), nil, nil, "", "") + if err != nil { + return nil, nil, nil, err + } + s.IDMappings = mappings } if s.NetNS.IsDefault() { defaultNS, err := GetDefaultNamespaceMode("net", rtc, pod) @@ -115,8 +107,6 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener s.CgroupNS = defaultNS } - options := []libpod.CtrCreateOption{} - if s.ContainerCreateCommand != nil { options = append(options, libpod.WithCreateCommand(s.ContainerCreateCommand)) } @@ -152,29 +142,21 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener return nil, nil, nil, err } - command, err := makeCommand(ctx, s, imageData, rtc) - if err != nil { - return nil, nil, nil, err + if len(s.HostUsers) > 0 { + options = append(options, libpod.WithHostUsers(s.HostUsers)) } - opts, err := createContainerOptions(ctx, rt, s, pod, finalVolumes, finalOverlays, imageData, command) + command, err := makeCommand(s, imageData, rtc) if err != nil { return nil, nil, nil, err } - options = append(options, opts...) - var exitCommandArgs []string - - exitCommandArgs, err = CreateExitCommandArgs(rt.StorageConfig(), rtc, logrus.IsLevelEnabled(logrus.DebugLevel), s.Remove, false) + infraVol := (len(compatibleOptions.Mounts) > 0 || len(compatibleOptions.Volumes) > 0 || len(compatibleOptions.ImageVolumes) > 0 || len(compatibleOptions.OverlayVolumes) > 0) + opts, err := createContainerOptions(rt, s, pod, finalVolumes, finalOverlays, imageData, command, infraVol, *compatibleOptions) if err != nil { return nil, nil, nil, err } - - options = append(options, libpod.WithExitCommand(exitCommandArgs)) - - if len(s.Aliases) > 0 { - options = append(options, libpod.WithNetworkAliases(s.Aliases)) - } + options = append(options, opts...) if containerType := s.InitContainerType; len(containerType) > 0 { options = append(options, libpod.WithInitCtrType(containerType)) @@ -183,27 +165,54 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener logrus.Debugf("setting container name %s", s.Name) options = append(options, libpod.WithName(s.Name)) } - if len(s.DevicesFrom) > 0 { - for _, dev := range s.DevicesFrom { - ctr, err := rt.GetContainer(dev) + if len(s.Devices) > 0 { + opts = ExtractCDIDevices(s) + options = append(options, opts...) + } + runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts, pod, command, compatibleOptions) + if clone { // the container fails to start if cloned due to missing Linux spec entries + if c == nil { + return nil, nil, nil, errors.New("the given container could not be retrieved") + } + conf := c.Config() + if conf != nil && conf.Spec != nil && conf.Spec.Linux != nil { + out, err := json.Marshal(conf.Spec.Linux) + if err != nil { + return nil, nil, nil, err + } + err = json.Unmarshal(out, runtimeSpec.Linux) if err != nil { return nil, nil, nil, err } - devices := ctr.DeviceHostSrc() - s.Devices = append(s.Devices, devices...) } - } - if len(s.Devices) > 0 { - opts = extractCDIDevices(s) - options = append(options, opts...) - } - runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts, pod, command) - if err != nil { - return nil, nil, nil, err + if s.ResourceLimits != nil { + switch { + case s.ResourceLimits.CPU != nil: + runtimeSpec.Linux.Resources.CPU = s.ResourceLimits.CPU + case s.ResourceLimits.Memory != nil: + runtimeSpec.Linux.Resources.Memory = s.ResourceLimits.Memory + case s.ResourceLimits.BlockIO != nil: + runtimeSpec.Linux.Resources.BlockIO = s.ResourceLimits.BlockIO + case s.ResourceLimits.Devices != nil: + runtimeSpec.Linux.Resources.Devices = s.ResourceLimits.Devices + } + } } if len(s.HostDeviceList) > 0 { options = append(options, libpod.WithHostDevice(s.HostDeviceList)) } + if infraSpec != nil && infraSpec.Linux != nil { // if we are inheriting Linux info from a pod... + // Pass Security annotations + if len(infraSpec.Annotations[define.InspectAnnotationLabel]) > 0 && len(runtimeSpec.Annotations[define.InspectAnnotationLabel]) == 0 { + runtimeSpec.Annotations[define.InspectAnnotationLabel] = infraSpec.Annotations[define.InspectAnnotationLabel] + } + if len(infraSpec.Annotations[define.InspectAnnotationSeccomp]) > 0 && len(runtimeSpec.Annotations[define.InspectAnnotationSeccomp]) == 0 { + runtimeSpec.Annotations[define.InspectAnnotationSeccomp] = infraSpec.Annotations[define.InspectAnnotationSeccomp] + } + if len(infraSpec.Annotations[define.InspectAnnotationApparmor]) > 0 && len(runtimeSpec.Annotations[define.InspectAnnotationApparmor]) == 0 { + runtimeSpec.Annotations[define.InspectAnnotationApparmor] = infraSpec.Annotations[define.InspectAnnotationApparmor] + } + } return runtimeSpec, s, options, err } func ExecuteCreate(ctx context.Context, rt *libpod.Runtime, runtimeSpec *spec.Spec, s *specgen.SpecGenerator, infra bool, options ...libpod.CtrCreateOption) (*libpod.Container, error) { @@ -215,33 +224,36 @@ func ExecuteCreate(ctx context.Context, rt *libpod.Runtime, runtimeSpec *spec.Sp return ctr, rt.PrepareVolumeOnCreateContainer(ctx, ctr) } -func extractCDIDevices(s *specgen.SpecGenerator) []libpod.CtrCreateOption { +// ExtractCDIDevices process the list of Devices in the spec and determines if any of these are CDI devices. +// The CDI devices are added to the list of CtrCreateOptions. +// Note that this may modify the device list associated with the spec, which should then only contain non-CDI devices. +func ExtractCDIDevices(s *specgen.SpecGenerator) []libpod.CtrCreateOption { devs := make([]spec.LinuxDevice, 0, len(s.Devices)) var cdiDevs []string var options []libpod.CtrCreateOption for _, device := range s.Devices { - isCDIDevice, err := cdi.HasDevice(device.Path) - if err != nil { - logrus.Debugf("CDI HasDevice Error: %v", err) - } - if err == nil && isCDIDevice { + if isCDIDevice(device.Path) { + logrus.Debugf("Identified CDI device %v", device.Path) cdiDevs = append(cdiDevs, device.Path) continue } - + logrus.Debugf("Non-CDI device %v; assuming standard device", device.Path) devs = append(devs, device) } - s.Devices = devs if len(cdiDevs) > 0 { options = append(options, libpod.WithCDI(cdiDevs)) } - return options } -func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGenerator, pod *libpod.Pod, volumes []*specgen.NamedVolume, overlays []*specgen.OverlayVolume, imageData *libimage.ImageData, command []string) ([]libpod.CtrCreateOption, error) { +// isCDIDevice checks whether the specified device is a CDI device. +func isCDIDevice(device string) bool { + return cdi.IsQualifiedName(device) +} + +func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *libpod.Pod, volumes []*specgen.NamedVolume, overlays []*specgen.OverlayVolume, imageData *libimage.ImageData, command []string, infraVolumes bool, compatibleOptions libpod.InfraInherit) ([]libpod.CtrCreateOption, error) { var options []libpod.CtrCreateOption var err error @@ -262,6 +274,9 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. if s.Volatile { options = append(options, libpod.WithVolatile()) } + if s.PasswdEntry != "" { + options = append(options, libpod.WithPasswdEntry(s.PasswdEntry)) + } useSystemd := false switch s.Systemd { @@ -280,7 +295,16 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. "/usr/sbin/init": true, "/usr/local/sbin/init": true, } - if useSystemdCommands[command[0]] || (filepath.Base(command[0]) == "systemd") { + // Grab last command in case this is launched from a shell + cmd := command + if len(command) > 2 { + // Podman build will add "/bin/sh" "-c" to + // Entrypoint. Remove and search for systemd + if command[0] == "/bin/sh" && command[1] == "-c" { + cmd = command[2:] + } + } + if useSystemdCommands[cmd[0]] || (filepath.Base(cmd[0]) == "systemd") { useSystemd = true } } @@ -322,7 +346,10 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. for _, imageVolume := range s.ImageVolumes { destinations = append(destinations, imageVolume.Destination) } - options = append(options, libpod.WithUserVolumes(destinations)) + + if len(destinations) > 0 || !infraVolumes { + options = append(options, libpod.WithUserVolumes(destinations)) + } if len(volumes) != 0 { var vols []*libpod.ContainerNamedVolume @@ -366,6 +393,9 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. if s.Entrypoint != nil { options = append(options, libpod.WithEntrypoint(s.Entrypoint)) } + if len(s.ContainerStorageConfig.StorageOpts) > 0 { + options = append(options, libpod.WithStorageOpts(s.StorageOpts)) + } // If the user did not specify a workdir on the CLI, let's extract it // from the image. if s.WorkDir == "" && imageData != nil { @@ -375,6 +405,9 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. if s.WorkDir == "" { s.WorkDir = "/" } + if s.CreateWorkingDir { + options = append(options, libpod.WithCreateWorkingDir()) + } if s.StopSignal != nil { options = append(options, libpod.WithStopSignal(*s.StopSignal)) } @@ -403,26 +436,24 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. // Security options if len(s.SelinuxOpts) > 0 { options = append(options, libpod.WithSecLabels(s.SelinuxOpts)) - } else { - if pod != nil { - // duplicate the security options from the pod - processLabel, err := pod.ProcessLabel() + } else if pod != nil && len(compatibleOptions.SelinuxOpts) == 0 { + // duplicate the security options from the pod + processLabel, err := pod.ProcessLabel() + if err != nil { + return nil, err + } + if processLabel != "" { + selinuxOpts, err := label.DupSecOpt(processLabel) if err != nil { return nil, err } - if processLabel != "" { - selinuxOpts, err := label.DupSecOpt(processLabel) - if err != nil { - return nil, err - } - options = append(options, libpod.WithSecLabels(selinuxOpts)) - } + options = append(options, libpod.WithSecLabels(selinuxOpts)) } } options = append(options, libpod.WithPrivileged(s.Privileged)) // Get namespace related options - namespaceOpts, err := namespaceOptions(ctx, s, rt, pod, imageData) + namespaceOpts, err := namespaceOptions(s, rt, pod, imageData) if err != nil { return nil, err } @@ -468,6 +499,7 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. UID: s.UID, GID: s.GID, Mode: s.Mode, + Target: s.Target, }) } options = append(options, libpod.WithSecrets(secrs)) @@ -491,56 +523,33 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen. if s.PidFile != "" { options = append(options, libpod.WithPidFile(s.PidFile)) } - return options, nil -} - -func CreateExitCommandArgs(storageConfig types.StoreOptions, config *config.Config, syslog, rm, exec bool) ([]string, error) { - // We need a cleanup process for containers in the current model. - // But we can't assume that the caller is Podman - it could be another - // user of the API. - // As such, provide a way to specify a path to Podman, so we can - // still invoke a cleanup process. - podmanPath, err := os.Executable() - if err != nil { - return nil, err + if len(s.ChrootDirs) != 0 { + options = append(options, libpod.WithChrootDirs(s.ChrootDirs)) } - command := []string{podmanPath, - "--root", storageConfig.GraphRoot, - "--runroot", storageConfig.RunRoot, - "--log-level", logrus.GetLevel().String(), - "--cgroup-manager", config.Engine.CgroupManager, - "--tmpdir", config.Engine.TmpDir, - "--cni-config-dir", config.Network.NetworkConfigDir, - } - if config.Engine.OCIRuntime != "" { - command = append(command, []string{"--runtime", config.Engine.OCIRuntime}...) - } - if storageConfig.GraphDriverName != "" { - command = append(command, []string{"--storage-driver", storageConfig.GraphDriverName}...) - } - for _, opt := range storageConfig.GraphDriverOptions { - command = append(command, []string{"--storage-opt", opt}...) - } - if config.Engine.EventsLogger != "" { - command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...) - } + options = append(options, libpod.WithSelectedPasswordManagement(s.Passwd)) - if syslog { - command = append(command, "--syslog") - } - command = append(command, []string{"container", "cleanup"}...) + return options, nil +} - if rm { - command = append(command, "--rm") +func Inherit(infra libpod.Container, s *specgen.SpecGenerator, rt *libpod.Runtime) (opts []libpod.CtrCreateOption, infraS *spec.Spec, compat *libpod.InfraInherit, err error) { + inheritSpec := &specgen.SpecGenerator{} + _, compatibleOptions, err := ConfigToSpec(rt, inheritSpec, infra.ID()) + if err != nil { + return nil, nil, nil, err } + options := []libpod.CtrCreateOption{} + infraConf := infra.Config() + infraSpec := infraConf.Spec - // This has to be absolutely last, to ensure that the exec session ID - // will be added after it by Libpod. - if exec { - command = append(command, "--exec") + compatByte, err := json.Marshal(compatibleOptions) + if err != nil { + return nil, nil, nil, err } - - return command, nil + err = json.Unmarshal(compatByte, s) + if err != nil { + return nil, nil, nil, err + } + return options, infraSpec, compatibleOptions, nil } diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go similarity index 81% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/namespaces.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go index 5349e224f3e..37d561ec281 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/namespaces.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go @@ -1,18 +1,18 @@ package generate import ( - "context" "fmt" "os" "strings" "github.com/containers/common/libimage" + "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" @@ -58,7 +58,7 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod) case "pid": return specgen.ParseNamespace(cfg.Containers.PidNS) case "ipc": - return specgen.ParseNamespace(cfg.Containers.IPCNS) + return specgen.ParseIPCNamespace(cfg.Containers.IPCNS) case "uts": return specgen.ParseNamespace(cfg.Containers.UTSNS) case "user": @@ -66,7 +66,7 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod) case "cgroup": return specgen.ParseCgroupNamespace(cfg.Containers.CgroupNS) case "net": - ns, _, err := specgen.ParseNetworkNamespace(cfg.Containers.NetNS, cfg.Containers.RootlessNetworking == "cni") + ns, _, _, err := specgen.ParseNetworkFlag(nil) return ns, err } @@ -79,7 +79,7 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod) // joining a pod. // TODO: Consider grouping options that are not directly attached to a namespace // elsewhere. -func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.Pod, imageData *libimage.ImageData) ([]libpod.CtrCreateOption, error) { +func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod.Pod, imageData *libimage.ImageData) ([]libpod.CtrCreateOption, error) { toReturn := []libpod.CtrCreateOption{} // If pod is not nil, get infra container. @@ -133,8 +133,17 @@ func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod. if err != nil { return nil, errors.Wrapf(err, "error looking up container to share ipc namespace with") } + if ipcCtr.ConfigNoCopy().NoShmShare { + return nil, errors.Errorf("joining IPC of container %s is not allowed: non-shareable IPC (hint: use IpcMode:shareable for the donor container)", ipcCtr.ID()) + } toReturn = append(toReturn, libpod.WithIPCNSFrom(ipcCtr)) - toReturn = append(toReturn, libpod.WithShmDir(ipcCtr.ShmDir())) + if !ipcCtr.ConfigNoCopy().NoShm { + toReturn = append(toReturn, libpod.WithShmDir(ipcCtr.ShmDir())) + } + case specgen.None: + toReturn = append(toReturn, libpod.WithNoShm(true)) + case specgen.Private: + toReturn = append(toReturn, libpod.WithNoShmShare(true)) } // UTS @@ -155,21 +164,19 @@ func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod. // User switch s.UserNS.NSMode { case specgen.KeepID: - if rootless.IsRootless() { - toReturn = append(toReturn, libpod.WithAddCurrentUserPasswdEntry()) - - // If user is not overridden, set user in the container - // to user running Podman. - if s.User == "" { - _, uid, gid, err := util.GetKeepIDMapping() - if err != nil { - return nil, err - } - toReturn = append(toReturn, libpod.WithUser(fmt.Sprintf("%d:%d", uid, gid))) + if !rootless.IsRootless() { + return nil, errors.New("keep-id is only supported in rootless mode") + } + toReturn = append(toReturn, libpod.WithAddCurrentUserPasswdEntry()) + + // If user is not overridden, set user in the container + // to user running Podman. + if s.User == "" { + _, uid, gid, err := util.GetKeepIDMapping() + if err != nil { + return nil, err } - } else { - // keep-id as root doesn't need a user namespace - s.UserNS.NSMode = specgen.Host + toReturn = append(toReturn, libpod.WithUser(fmt.Sprintf("%d:%d", uid, gid))) } case specgen.FromPod: if pod == nil || infraCtr == nil { @@ -192,8 +199,12 @@ func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod. // This wipes the UserNS settings that get set from the infra container // when we are inheritting from the pod. So only apply this if the container // is not being created in a pod. - if s.IDMappings != nil && pod == nil { - toReturn = append(toReturn, libpod.WithIDMappings(*s.IDMappings)) + if s.IDMappings != nil { + if pod == nil { + toReturn = append(toReturn, libpod.WithIDMappings(*s.IDMappings)) + } else if pod.HasInfraContainer() && (len(s.IDMappings.UIDMap) > 0 || len(s.IDMappings.GIDMap) > 0) { + return nil, errors.Wrapf(define.ErrInvalidArg, "cannot specify a new uid/gid map when entering a pod with an infra container") + } } if s.User != "" { toReturn = append(toReturn, libpod.WithUser(s.User)) @@ -242,7 +253,7 @@ func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod. } toReturn = append(toReturn, libpod.WithNetNSFrom(netCtr)) case specgen.Slirp: - portMappings, expose, err := createPortMappings(ctx, s, imageData) + portMappings, expose, err := createPortMappings(s, imageData) if err != nil { return nil, err } @@ -250,15 +261,42 @@ func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod. if s.NetNS.Value != "" { val = fmt.Sprintf("slirp4netns:%s", s.NetNS.Value) } - toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, val, s.CNINetworks)) + toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, val, nil)) case specgen.Private: fallthrough case specgen.Bridge: - portMappings, expose, err := createPortMappings(ctx, s, imageData) + portMappings, expose, err := createPortMappings(s, imageData) if err != nil { return nil, err } - toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, "bridge", s.CNINetworks)) + + rtConfig, err := rt.GetConfigNoCopy() + if err != nil { + return nil, err + } + // if no network was specified use add the default + if len(s.Networks) == 0 { + // backwards config still allow the old cni networks list and convert to new format + if len(s.CNINetworks) > 0 { + logrus.Warn(`specgen "cni_networks" option is deprecated use the "networks" map instead`) + networks := make(map[string]types.PerNetworkOptions, len(s.CNINetworks)) + for _, net := range s.CNINetworks { + networks[net] = types.PerNetworkOptions{} + } + s.Networks = networks + } else { + // no networks given but bridge is set so use default network + s.Networks = map[string]types.PerNetworkOptions{ + rtConfig.Network.DefaultNetwork: {}, + } + } + } + // rename the "default" network to the correct default name + if opts, ok := s.Networks["default"]; ok { + s.Networks[rtConfig.Network.DefaultNetwork] = opts + delete(s.Networks, "default") + } + toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, "bridge", s.Networks)) } if s.UseImageHosts { @@ -281,12 +319,6 @@ func namespaceOptions(ctx context.Context, s *specgen.SpecGenerator, rt *libpod. if len(s.DNSOptions) > 0 { toReturn = append(toReturn, libpod.WithDNSOption(s.DNSOptions)) } - if s.StaticIP != nil { - toReturn = append(toReturn, libpod.WithStaticIP(*s.StaticIP)) - } - if s.StaticMAC != nil { - toReturn = append(toReturn, libpod.WithStaticMAC(*s.StaticMAC)) - } if s.NetworkOptions != nil { toReturn = append(toReturn, libpod.WithNetworkOptions(s.NetworkOptions)) } @@ -448,13 +480,13 @@ func GetNamespaceOptions(ns []string, netnsIsHost bool) ([]libpod.PodCreateOptio var options []libpod.PodCreateOption var erroredOptions []libpod.PodCreateOption if ns == nil { - //set the default namespaces + // set the default namespaces ns = strings.Split(specgen.DefaultKernelNamespaces, ",") } for _, toShare := range ns { switch toShare { case "cgroup": - options = append(options, libpod.WithPodCgroups()) + options = append(options, libpod.WithPodCgroup()) case "net": // share the netns setting with other containers in the pod only when it is not set to host if !netnsIsHost { @@ -474,7 +506,7 @@ func GetNamespaceOptions(ns []string, netnsIsHost bool) ([]libpod.PodCreateOptio case "none": return erroredOptions, nil default: - return erroredOptions, errors.Errorf("Invalid kernel namespace to share: %s. Options are: net, pid, ipc, uts or none", toShare) + return erroredOptions, errors.Errorf("Invalid kernel namespace to share: %s. Options are: cgroup, ipc, net, pid, uts or none", toShare) } } return options, nil diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/oci.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go similarity index 77% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/oci.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go index beccd9fc2a1..b77c00f5063 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/oci.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go @@ -2,16 +2,17 @@ package generate import ( "context" + "encoding/json" "path" "strings" "github.com/containers/common/libimage" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/pkg/errors" @@ -31,7 +32,7 @@ func setProcOpts(s *specgen.SpecGenerator, g *generate.Generator) { } } -func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) error { +func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) { var ( isRootless = rootless.IsRootless() nofileSet = false @@ -40,7 +41,7 @@ func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) error { if s.Rlimits == nil { g.Config.Process.Rlimits = nil - return nil + return } for _, u := range s.Rlimits { @@ -90,12 +91,10 @@ func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) error { } g.AddProcessRlimits("RLIMIT_NPROC", max, current) } - - return nil } // Produce the final command for the container. -func makeCommand(ctx context.Context, s *specgen.SpecGenerator, imageData *libimage.ImageData, rtc *config.Config) ([]string, error) { +func makeCommand(s *specgen.SpecGenerator, imageData *libimage.ImageData, rtc *config.Config) ([]string, error) { finalCommand := []string{} entrypoint := s.Entrypoint @@ -151,7 +150,7 @@ func canMountSys(isRootless, isNewUserns bool, s *specgen.SpecGenerator) bool { return true } -func getCGroupPermissons(unmask []string) string { +func getCgroupPermissons(unmask []string) string { ro := "ro" rw := "rw" cgroup := "/sys/fs/cgroup" @@ -174,8 +173,8 @@ func getCGroupPermissons(unmask []string) string { } // SpecGenToOCI returns the base configuration for the container. -func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, rtc *config.Config, newImage *libimage.Image, mounts []spec.Mount, pod *libpod.Pod, finalCmd []string) (*spec.Spec, error) { - cgroupPerm := getCGroupPermissons(s.Unmask) +func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, rtc *config.Config, newImage *libimage.Image, mounts []spec.Mount, pod *libpod.Pod, finalCmd []string, compatibleOptions *libpod.InfraInherit) (*spec.Spec, error) { + cgroupPerm := getCgroupPermissons(s.Unmask) g, err := generate.New("linux") if err != nil { @@ -298,11 +297,46 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt for key, val := range s.Annotations { g.AddAnnotation(key, val) } - g.AddProcessEnv("container", "podman") - g.Config.Linux.Resources = s.ResourceLimits + switch { + case compatibleOptions.InfraResources == nil && s.ResourceLimits != nil: + out, err := json.Marshal(s.ResourceLimits) + if err != nil { + return nil, err + } + err = json.Unmarshal(out, g.Config.Linux.Resources) + if err != nil { + return nil, err + } + case s.ResourceLimits != nil: // if we have predefined resource limits we need to make sure we keep the infra and container limits + originalResources, err := json.Marshal(s.ResourceLimits) + if err != nil { + return nil, err + } + infraResources, err := json.Marshal(compatibleOptions.InfraResources) + if err != nil { + return nil, err + } + err = json.Unmarshal(infraResources, s.ResourceLimits) // put infra's resource limits in the container + if err != nil { + return nil, err + } + err = json.Unmarshal(originalResources, s.ResourceLimits) // make sure we did not override anything + if err != nil { + return nil, err + } + g.Config.Linux.Resources = s.ResourceLimits + default: + g.Config.Linux.Resources = compatibleOptions.InfraResources + } // Devices + // set the default rule at the beginning of device configuration + if !inUserNS && !s.Privileged { + g.AddLinuxResourcesDevice(false, "", nil, nil, "rwm") + } + + var userDevices []spec.LinuxDevice if s.Privileged { // If privileged, we need to add all the host devices to the // spec. We do not add the user provided ones because we are @@ -317,28 +351,43 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt return nil, err } } + if len(compatibleOptions.HostDeviceList) > 0 && len(s.Devices) == 0 { + userDevices = compatibleOptions.HostDeviceList + } else { + userDevices = s.Devices + } // add default devices specified by caller - for _, device := range s.Devices { + for _, device := range userDevices { if err = DevicesFromPath(&g, device.Path); err != nil { return nil, err } } } - s.HostDeviceList = s.Devices + s.HostDeviceList = userDevices - for _, dev := range s.DeviceCGroupRule { - g.AddLinuxResourcesDevice(true, dev.Type, dev.Major, dev.Minor, dev.Access) + // set the devices cgroup when not running in a user namespace + if !inUserNS && !s.Privileged { + for _, dev := range s.DeviceCgroupRule { + g.AddLinuxResourcesDevice(true, dev.Type, dev.Major, dev.Minor, dev.Access) + } + } + + for k, v := range s.WeightDevice { + statT := unix.Stat_t{} + if err := unix.Stat(k, &statT); err != nil { + return nil, errors.Wrapf(err, "failed to inspect '%s' in --blkio-weight-device", k) + } + g.AddLinuxResourcesBlockIOWeightDevice((int64(unix.Major(uint64(statT.Rdev)))), (int64(unix.Minor(uint64(statT.Rdev)))), *v.Weight) } BlockAccessToKernelFilesystems(s.Privileged, s.PidNS.IsHost(), s.Mask, s.Unmask, &g) + g.ClearProcessEnv() for name, val := range s.Env { g.AddProcessEnv(name, val) } - if err := addRlimits(s, &g); err != nil { - return nil, err - } + addRlimits(s, &g) // NAMESPACES if err := specConfigureNamespaces(s, &g, rt, pod); err != nil { diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/pod_create.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go similarity index 60% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/pod_create.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go index a4027eae749..a3408b4023a 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/pod_create.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go @@ -2,22 +2,115 @@ package generate import ( "context" + "fmt" + "io/ioutil" "net" + "os" + buildahDefine "github.com/containers/buildah/define" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/domain/entities" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/specgen" + "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v4/pkg/specgen" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) { + version, err := define.GetVersion() + if err != nil { + return "", err + } + imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built) + + // First check if the image has already been built. + if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil { + return imageName, nil + } + + // Also look into the path as some distributions install catatonit in + // /usr/bin. + catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true) + if err != nil { + return "", fmt.Errorf("finding pause binary: %w", err) + } + + buildContent := fmt.Sprintf(`FROM scratch +COPY %s /catatonit +ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath) + + tmpF, err := ioutil.TempFile("", "pause.containerfile") + if err != nil { + return "", err + } + if _, err := tmpF.WriteString(buildContent); err != nil { + return "", err + } + if err := tmpF.Close(); err != nil { + return "", err + } + defer os.Remove(tmpF.Name()) + + buildOptions := buildahDefine.BuildOptions{ + CommonBuildOpts: &buildahDefine.CommonBuildOptions{}, + Output: imageName, + Quiet: true, + IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529) + IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout + } + if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil { + return "", err + } + + return imageName, nil +} + +func pullOrBuildInfraImage(p *entities.PodSpec, rt *libpod.Runtime) error { + if p.PodSpecGen.NoInfra { + return nil + } + + rtConfig, err := rt.GetConfigNoCopy() + if err != nil { + return err + } + + // NOTE: we need pull down the infra image if it was explicitly set by + // the user (or containers.conf) to the non-default one. + imageName := p.PodSpecGen.InfraImage + if imageName == "" { + imageName = rtConfig.Engine.InfraImage + } + + if imageName != "" { + _, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil) + if err != nil { + return err + } + } else { + name, err := buildPauseImage(rt, rtConfig) + if err != nil { + return fmt.Errorf("building local pause image: %w", err) + } + imageName = name + } + + p.PodSpecGen.InfraImage = imageName + p.PodSpecGen.InfraContainerSpec.RawImageName = imageName + + return nil +} + func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) { if err := p.PodSpecGen.Validate(); err != nil { return nil, err } + + if err := pullOrBuildInfraImage(p, rt); err != nil { + return nil, err + } + if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil { var err error p.PodSpecGen.InfraContainerSpec, err = MapSpec(&p.PodSpecGen) @@ -26,7 +119,7 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) { } } - options, err := createPodOptions(&p.PodSpecGen, rt, p.PodSpecGen.InfraContainerSpec) + options, err := createPodOptions(&p.PodSpecGen) if err != nil { return nil, err } @@ -35,7 +128,6 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) { return nil, err } if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil { - p.PodSpecGen.InfraContainerSpec.ContainerCreateCommand = []string{} // we do NOT want os.Args as the command, will display the pod create cmd if p.PodSpecGen.InfraContainerSpec.Name == "" { p.PodSpecGen.InfraContainerSpec.Name = pod.ID()[:12] + "-infra" } @@ -44,7 +136,7 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) { return nil, err } p.PodSpecGen.InfraContainerSpec.User = "" // infraSpec user will get incorrectly assigned via the container creation process, overwrite here - rtSpec, spec, opts, err := MakeContainer(context.Background(), rt, p.PodSpecGen.InfraContainerSpec) + rtSpec, spec, opts, err := MakeContainer(context.Background(), rt, p.PodSpecGen.InfraContainerSpec, false, nil) if err != nil { return nil, err } @@ -69,12 +161,15 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) { return pod, nil } -func createPodOptions(p *specgen.PodSpecGenerator, rt *libpod.Runtime, infraSpec *specgen.SpecGenerator) ([]libpod.PodCreateOption, error) { +func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, error) { var ( options []libpod.PodCreateOption ) - if !p.NoInfra { //&& infraSpec != nil { + if !p.NoInfra { options = append(options, libpod.WithInfraContainer()) + if p.ShareParent == nil || (p.ShareParent != nil && *p.ShareParent) { + options = append(options, libpod.WithPodParent()) + } nsOptions, err := GetNamespaceOptions(p.SharedNamespaces, p.InfraContainerSpec.NetNS.IsHost()) if err != nil { return nil, err @@ -109,11 +204,11 @@ func createPodOptions(p *specgen.PodSpecGenerator, rt *libpod.Runtime, infraSpec // replacing necessary values with those specified in pod creation func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { if len(p.PortMappings) > 0 { - ports, _, _, err := ParsePortMapping(p.PortMappings) + ports, err := ParsePortMapping(p.PortMappings, nil) if err != nil { return nil, err } - p.InfraContainerSpec.PortMappings = libpod.WithInfraContainerPorts(ports, p.InfraContainerSpec) + p.InfraContainerSpec.PortMappings = ports } switch p.NetNS.NSMode { case specgen.Default, "": @@ -121,24 +216,13 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { logrus.Debugf("No networking because the infra container is missing") break } - if rootless.IsRootless() { - logrus.Debugf("Pod will use slirp4netns") - if p.InfraContainerSpec.NetNS.NSMode != "host" { - p.InfraContainerSpec.NetworkOptions = p.NetworkOptions - p.InfraContainerSpec.NetNS.NSMode = specgen.NamespaceMode("slirp4netns") - } - } else { - logrus.Debugf("Pod using bridge network mode") - } case specgen.Bridge: p.InfraContainerSpec.NetNS.NSMode = specgen.Bridge logrus.Debugf("Pod using bridge network mode") case specgen.Host: logrus.Debugf("Pod will use host networking") if len(p.InfraContainerSpec.PortMappings) > 0 || - p.InfraContainerSpec.StaticIP != nil || - p.InfraContainerSpec.StaticMAC != nil || - len(p.InfraContainerSpec.CNINetworks) > 0 || + len(p.InfraContainerSpec.Networks) > 0 || p.InfraContainerSpec.NetNS.NSMode == specgen.NoNetwork { return nil, errors.Wrapf(define.ErrInvalidArg, "cannot set host network if network-related configuration is specified") } @@ -152,9 +236,7 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { case specgen.NoNetwork: logrus.Debugf("Pod will not use networking") if len(p.InfraContainerSpec.PortMappings) > 0 || - p.InfraContainerSpec.StaticIP != nil || - p.InfraContainerSpec.StaticMAC != nil || - len(p.InfraContainerSpec.CNINetworks) > 0 || + len(p.InfraContainerSpec.Networks) > 0 || p.InfraContainerSpec.NetNS.NSMode == "host" { return nil, errors.Wrapf(define.ErrInvalidArg, "cannot disable pod network if network-related configuration is specified") } @@ -163,7 +245,6 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { return nil, errors.Errorf("pods presently do not support network mode %s", p.NetNS.NSMode) } - libpod.WithPodCgroups() if len(p.InfraCommand) > 0 { p.InfraContainerSpec.Entrypoint = p.InfraCommand } @@ -183,15 +264,13 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { if len(p.DNSSearch) > 0 { p.InfraContainerSpec.DNSSearch = p.DNSSearch } - if p.StaticIP != nil { - p.InfraContainerSpec.StaticIP = p.StaticIP - } - if p.StaticMAC != nil { - p.InfraContainerSpec.StaticMAC = p.StaticMAC - } if p.NoManageResolvConf { p.InfraContainerSpec.UseImageResolvConf = true } + if len(p.Networks) > 0 { + p.InfraContainerSpec.Networks = p.Networks + } + // deprecated cni networks for api users if len(p.CNINetworks) > 0 { p.InfraContainerSpec.CNINetworks = p.CNINetworks } @@ -203,8 +282,6 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { p.InfraContainerSpec.ConmonPidFile = p.InfraConmonPidFile } - if p.InfraImage != config.DefaultInfraImage { - p.InfraContainerSpec.Image = p.InfraImage - } + p.InfraContainerSpec.Image = p.InfraImage return p.InfraContainerSpec, nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go new file mode 100644 index 00000000000..bec548d3bac --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go @@ -0,0 +1,421 @@ +package generate + +import ( + "fmt" + "net" + "sort" + "strings" + + "github.com/containers/common/libimage" + "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v4/utils" + + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/specgenutil" + "github.com/containers/podman/v4/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + protoTCP = "tcp" + protoUDP = "udp" + protoSCTP = "sctp" +) + +// joinTwoPortsToRangePortIfPossible will expect two ports the previous port one must have a lower or equal hostPort than the current port. +func joinTwoPortsToRangePortIfPossible(ports *[]types.PortMapping, allHostPorts, allContainerPorts, currentHostPorts *[65536]bool, + previousPort *types.PortMapping, port types.PortMapping) (*types.PortMapping, error) { + // no previous port just return the current one + if previousPort == nil { + return &port, nil + } + if previousPort.HostPort+previousPort.Range >= port.HostPort { + // check if the port range matches the host and container ports + portDiff := port.HostPort - previousPort.HostPort + if portDiff == port.ContainerPort-previousPort.ContainerPort { + // calc the new range use the old range and add the difference between the ports + newRange := port.Range + portDiff + // if the newRange is greater than the old range use it + // this is important otherwise we would could lower the range + if newRange > previousPort.Range { + previousPort.Range = newRange + } + return previousPort, nil + } + // if both host port ranges overlap and the container port range did not match + // we have to error because we cannot assign the same host port to more than one container port + if previousPort.HostPort+previousPort.Range-1 > port.HostPort { + return nil, errors.Errorf("conflicting port mappings for host port %d (protocol %s)", port.HostPort, port.Protocol) + } + } + // we could not join the ports so we append the old one to the list + // and return the current port as previous port + addPortToUsedPorts(ports, allHostPorts, allContainerPorts, currentHostPorts, previousPort) + return &port, nil +} + +// joinTwoContainerPortsToRangePortIfPossible will expect two ports with both no host port set, +// the previous port one must have a lower or equal containerPort than the current port. +func joinTwoContainerPortsToRangePortIfPossible(ports *[]types.PortMapping, allHostPorts, allContainerPorts, currentHostPorts *[65536]bool, + previousPort *types.PortMapping, port types.PortMapping) (*types.PortMapping, error) { + // no previous port just return the current one + if previousPort == nil { + return &port, nil + } + if previousPort.ContainerPort+previousPort.Range > port.ContainerPort { + // calc the new range use the old range and add the difference between the ports + newRange := port.ContainerPort - previousPort.ContainerPort + port.Range + // if the newRange is greater than the old range use it + // this is important otherwise we would could lower the range + if newRange > previousPort.Range { + previousPort.Range = newRange + } + return previousPort, nil + } + // we could not join the ports so we append the old one to the list + // and return the current port as previous port + newPort, err := getRandomHostPort(currentHostPorts, *previousPort) + if err != nil { + return nil, err + } + addPortToUsedPorts(ports, allHostPorts, allContainerPorts, currentHostPorts, &newPort) + return &port, nil +} + +func addPortToUsedPorts(ports *[]types.PortMapping, allHostPorts, allContainerPorts, currentHostPorts *[65536]bool, port *types.PortMapping) { + for i := uint16(0); i < port.Range; i++ { + h := port.HostPort + i + allHostPorts[h] = true + currentHostPorts[h] = true + c := port.ContainerPort + i + allContainerPorts[c] = true + } + *ports = append(*ports, *port) +} + +// getRandomHostPort get a random host port mapping for the given port +// the caller has to supply a array with he already used ports +func getRandomHostPort(hostPorts *[65536]bool, port types.PortMapping) (types.PortMapping, error) { +outer: + for i := 0; i < 15; i++ { + ranPort, err := utils.GetRandomPort() + if err != nil { + return port, err + } + + // if port range is exceeds max port we cannot use it + if ranPort+int(port.Range) > 65535 { + continue + } + + // check if there is a port in the range which is used + for j := 0; j < int(port.Range); j++ { + // port already used + if hostPorts[ranPort+j] { + continue outer + } + } + + port.HostPort = uint16(ranPort) + return port, nil + } + + // add range to error message if needed + rangePort := "" + if port.Range > 1 { + rangePort = fmt.Sprintf("with range %d ", port.Range) + } + + return port, errors.Errorf("failed to find an open port to expose container port %d %son the host", port.ContainerPort, rangePort) +} + +// Parse port maps to port mappings. +// Returns a set of port mappings, and maps of utilized container and +// host ports. +func ParsePortMapping(portMappings []types.PortMapping, exposePorts map[uint16][]string) ([]types.PortMapping, error) { + if len(portMappings) == 0 && len(exposePorts) == 0 { + return nil, nil + } + + // tempMapping stores the ports without ip and protocol + type tempMapping struct { + hostPort uint16 + containerPort uint16 + rangePort uint16 + } + + // portMap is a temporary structure to sort all ports + // the map is hostIp -> protocol -> array of mappings + portMap := make(map[string]map[string][]tempMapping) + + // allUsedContainerPorts stores all used ports for each protocol + // the key is the protocol and the array is 65536 elements long for each port. + allUsedContainerPortsMap := make(map[string][65536]bool) + allUsedHostPortsMap := make(map[string][65536]bool) + + // First, we need to validate the ports passed in the specgen + for _, port := range portMappings { + // First, check proto + protocols, err := checkProtocol(port.Protocol, true) + if err != nil { + return nil, err + } + if port.HostIP != "" { + if ip := net.ParseIP(port.HostIP); ip == nil { + return nil, errors.Errorf("invalid IP address %q in port mapping", port.HostIP) + } + } + + // Validate port numbers and range. + portRange := port.Range + if portRange == 0 { + portRange = 1 + } + containerPort := port.ContainerPort + if containerPort == 0 { + return nil, errors.Errorf("container port number must be non-0") + } + hostPort := port.HostPort + if uint32(portRange-1)+uint32(containerPort) > 65535 { + return nil, errors.Errorf("container port range exceeds maximum allowable port number") + } + if uint32(portRange-1)+uint32(hostPort) > 65535 { + return nil, errors.Errorf("host port range exceeds maximum allowable port number") + } + + hostProtoMap, ok := portMap[port.HostIP] + if !ok { + hostProtoMap = make(map[string][]tempMapping) + for _, proto := range []string{protoTCP, protoUDP, protoSCTP} { + hostProtoMap[proto] = make([]tempMapping, 0) + } + portMap[port.HostIP] = hostProtoMap + } + + p := tempMapping{ + hostPort: port.HostPort, + containerPort: port.ContainerPort, + rangePort: portRange, + } + + for _, proto := range protocols { + hostProtoMap[proto] = append(hostProtoMap[proto], p) + } + } + + // we do no longer need the original port mappings + // set it to 0 length so we can reuse it to populate + // the slice again while keeping the underlying capacity + portMappings = portMappings[:0] + + for hostIP, protoMap := range portMap { + for protocol, ports := range protoMap { + ports := ports + if len(ports) == 0 { + continue + } + // 1. sort the ports by host port + // use a small hack to make sure ports with host port 0 are sorted last + sort.Slice(ports, func(i, j int) bool { + if ports[i].hostPort == ports[j].hostPort { + return ports[i].containerPort < ports[j].containerPort + } + if ports[i].hostPort == 0 { + return false + } + if ports[j].hostPort == 0 { + return true + } + return ports[i].hostPort < ports[j].hostPort + }) + + allUsedContainerPorts := allUsedContainerPortsMap[protocol] + allUsedHostPorts := allUsedHostPortsMap[protocol] + var usedHostPorts [65536]bool + + var previousPort *types.PortMapping + var i int + for i = 0; i < len(ports); i++ { + if ports[i].hostPort == 0 { + // because the ports are sorted and host port 0 is last + // we can break when we hit 0 + // we will fit them in afterwards + break + } + p := types.PortMapping{ + HostIP: hostIP, + Protocol: protocol, + HostPort: ports[i].hostPort, + ContainerPort: ports[i].containerPort, + Range: ports[i].rangePort, + } + var err error + previousPort, err = joinTwoPortsToRangePortIfPossible(&portMappings, &allUsedHostPorts, + &allUsedContainerPorts, &usedHostPorts, previousPort, p) + if err != nil { + return nil, err + } + } + if previousPort != nil { + addPortToUsedPorts(&portMappings, &allUsedHostPorts, + &allUsedContainerPorts, &usedHostPorts, previousPort) + } + + // now take care of the hostPort = 0 ports + previousPort = nil + for i < len(ports) { + p := types.PortMapping{ + HostIP: hostIP, + Protocol: protocol, + ContainerPort: ports[i].containerPort, + Range: ports[i].rangePort, + } + var err error + previousPort, err = joinTwoContainerPortsToRangePortIfPossible(&portMappings, &allUsedHostPorts, + &allUsedContainerPorts, &usedHostPorts, previousPort, p) + if err != nil { + return nil, err + } + i++ + } + if previousPort != nil { + newPort, err := getRandomHostPort(&usedHostPorts, *previousPort) + if err != nil { + return nil, err + } + addPortToUsedPorts(&portMappings, &allUsedHostPorts, + &allUsedContainerPorts, &usedHostPorts, &newPort) + } + + allUsedContainerPortsMap[protocol] = allUsedContainerPorts + allUsedHostPortsMap[protocol] = allUsedHostPorts + } + } + + if len(exposePorts) > 0 { + logrus.Debugf("Adding exposed ports") + + for port, protocols := range exposePorts { + newProtocols := make([]string, 0, len(protocols)) + for _, protocol := range protocols { + if !allUsedContainerPortsMap[protocol][port] { + p := types.PortMapping{ + ContainerPort: port, + Protocol: protocol, + Range: 1, + } + allPorts := allUsedContainerPortsMap[protocol] + p, err := getRandomHostPort(&allPorts, p) + if err != nil { + return nil, err + } + portMappings = append(portMappings, p) + } else { + newProtocols = append(newProtocols, protocol) + } + } + // make sure to delete the key from the map if there are no protocols left + if len(newProtocols) == 0 { + delete(exposePorts, port) + } else { + exposePorts[port] = newProtocols + } + } + } + return portMappings, nil +} + +func appendProtocolsNoDuplicates(slice []string, protocols []string) []string { + for _, proto := range protocols { + if util.StringInSlice(proto, slice) { + continue + } + slice = append(slice, proto) + } + return slice +} + +// Make final port mappings for the container +func createPortMappings(s *specgen.SpecGenerator, imageData *libimage.ImageData) ([]types.PortMapping, map[uint16][]string, error) { + expose := make(map[uint16]string) + var err error + if imageData != nil { + expose, err = GenExposedPorts(imageData.Config.ExposedPorts) + if err != nil { + return nil, nil, err + } + } + + toExpose := make(map[uint16][]string, len(s.Expose)+len(expose)) + for _, expose := range []map[uint16]string{expose, s.Expose} { + for port, proto := range expose { + if port == 0 { + return nil, nil, errors.Errorf("cannot expose 0 as it is not a valid port number") + } + protocols, err := checkProtocol(proto, false) + if err != nil { + return nil, nil, errors.Wrapf(err, "error validating protocols for exposed port %d", port) + } + toExpose[port] = appendProtocolsNoDuplicates(toExpose[port], protocols) + } + } + + publishPorts := toExpose + if !s.PublishExposedPorts { + publishPorts = nil + } + + finalMappings, err := ParsePortMapping(s.PortMappings, publishPorts) + if err != nil { + return nil, nil, err + } + return finalMappings, toExpose, nil +} + +// Check a string to ensure it is a comma-separated set of valid protocols +func checkProtocol(protocol string, allowSCTP bool) ([]string, error) { + protocols := make(map[string]struct{}) + splitProto := strings.Split(protocol, ",") + // Don't error on duplicates - just deduplicate + for _, p := range splitProto { + p = strings.ToLower(p) + switch p { + case protoTCP, "": + protocols[protoTCP] = struct{}{} + case protoUDP: + protocols[protoUDP] = struct{}{} + case protoSCTP: + if !allowSCTP { + return nil, errors.Errorf("protocol SCTP is not allowed for exposed ports") + } + protocols[protoSCTP] = struct{}{} + default: + return nil, errors.Errorf("unrecognized protocol %q in port mapping", p) + } + } + + finalProto := []string{} + for p := range protocols { + finalProto = append(finalProto, p) + } + + // This shouldn't be possible, but check anyways + if len(finalProto) == 0 { + return nil, errors.Errorf("no valid protocols specified for port mapping") + } + + return finalProto, nil +} + +func GenExposedPorts(exposedPorts map[string]struct{}) (map[uint16]string, error) { + expose := make([]string, 0, len(exposedPorts)) + for e := range exposedPorts { + expose = append(expose, e) + } + toReturn, err := specgenutil.CreateExpose(expose) + if err != nil { + return nil, errors.Wrapf(err, "unable to convert image EXPOSE") + } + return toReturn, nil +} diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/security.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security.go similarity index 93% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/security.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/security.go index a11debdb544..ec52164abf8 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/security.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security.go @@ -7,10 +7,10 @@ import ( "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v3/libpod" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -146,6 +146,10 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, configSpec := g.Config configSpec.Process.Capabilities.Ambient = []string{} + + // Always unset the inheritable capabilities similarly to what the Linux kernel does + // They are used only when using capabilities with uid != 0. + configSpec.Process.Capabilities.Inheritable = []string{} configSpec.Process.Capabilities.Bounding = caplist user := strings.Split(s.User, ":")[0] @@ -153,7 +157,6 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, if (user == "" && s.UserNS.NSMode != specgen.KeepID) || user == "root" || user == "0" { configSpec.Process.Capabilities.Effective = caplist configSpec.Process.Capabilities.Permitted = caplist - configSpec.Process.Capabilities.Inheritable = caplist } else { mergedCaps, err := capabilities.MergeCapabilities(nil, s.CapAdd, nil) if err != nil { @@ -175,12 +178,12 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } configSpec.Process.Capabilities.Effective = userCaps configSpec.Process.Capabilities.Permitted = userCaps - configSpec.Process.Capabilities.Inheritable = userCaps // Ambient capabilities were added to Linux 4.3. Set ambient // capabilities only when the kernel supports them. if supportAmbientCapabilities() { configSpec.Process.Capabilities.Ambient = userCaps + configSpec.Process.Capabilities.Inheritable = userCaps } } @@ -219,7 +222,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, for sysctlKey, sysctlVal := range defaultSysctls { // Ignore mqueue sysctls if --ipc=host if noUseIPC && strings.HasPrefix(sysctlKey, "fs.mqueue.") { - logrus.Infof("Sysctl %s=%s ignored in containers.conf, since IPC Namespace set to host", sysctlKey, sysctlVal) + logrus.Infof("Sysctl %s=%s ignored in containers.conf, since IPC Namespace set to %q", sysctlKey, sysctlVal, s.IpcNS.NSMode) continue } @@ -246,7 +249,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, // Ignore net sysctls if --net=host if s.NetNS.IsHost() && strings.HasPrefix(sysctlKey, "net.") { - return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since Host Namespace set to host", sysctlKey, sysctlVal) + return errors.Wrapf(define.ErrInvalidArg, "sysctl %s=%s can't be set since Network Namespace set to host", sysctlKey, sysctlVal) } // Ignore uts sysctls if --uts=host diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/storage.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go similarity index 97% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/storage.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go index de655ad7d73..f30fc4671ed 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/storage.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go @@ -11,10 +11,10 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/parse" - "github.com/containers/podman/v3/libpod" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -208,15 +208,12 @@ func getImageVolumes(ctx context.Context, img *libimage.Image, s *specgen.SpecGe return mounts, volumes, nil } - inspect, err := img.Inspect(ctx, false) + inspect, err := img.Inspect(ctx, nil) if err != nil { return nil, nil, errors.Wrapf(err, "error inspecting image to get image volumes") } for volume := range inspect.Config.Volumes { logrus.Debugf("Image has volume at %q", volume) - if err = parse.ValidateVolumeCtrDir(volume); err != nil { - return nil, nil, err - } cleanDest := filepath.Clean(volume) switch mode { case "", "anonymous": @@ -295,7 +292,7 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s // and append them in if we can find them. spec := ctr.Spec() if spec == nil { - return nil, nil, errors.Errorf("error retrieving container %s spec for volumes-from", ctr.ID()) + return nil, nil, errors.Errorf("retrieving container %s spec for volumes-from", ctr.ID()) } for _, mnt := range spec.Mounts { if mnt.Type != define.TypeBind { diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go similarity index 93% rename from vendor/github.com/containers/podman/v3/pkg/specgen/generate/validate.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go index b0d84825e8f..44c7818e7cb 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/generate/validate.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go @@ -4,10 +4,10 @@ import ( "os" "path/filepath" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/sysinfo" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/specgen" - "github.com/containers/podman/v3/utils" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/utils" "github.com/pkg/errors" ) @@ -47,10 +47,8 @@ func verifyContainerResourcesCgroupV1(s *specgen.SpecGenerator) ([]string, error if !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") memory.Swappiness = nil - } else { - if *memory.Swappiness > 100 { - return warnings, errors.Errorf("invalid value: %v, valid memory swappiness range is 0-100", *memory.Swappiness) - } + } else if *memory.Swappiness > 100 { + return warnings, errors.Errorf("invalid value: %v, valid memory swappiness range is 0-100", *memory.Swappiness) } } if memory.Reservation != nil && !sysInfo.MemoryReservation { @@ -60,10 +58,6 @@ func verifyContainerResourcesCgroupV1(s *specgen.SpecGenerator) ([]string, error if memory.Limit != nil && memory.Reservation != nil && *memory.Limit < *memory.Reservation { return warnings, errors.New("minimum memory limit cannot be less than memory reservation limit, see usage") } - if memory.Kernel != nil && !sysInfo.KernelMemory { - warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") - memory.Kernel = nil - } if memory.DisableOOMKiller != nil && *memory.DisableOOMKiller && !sysInfo.OomKillDisable { warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") memory.DisableOOMKiller = nil diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go similarity index 54% rename from vendor/github.com/containers/podman/v3/pkg/specgen/namespaces.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go index 2f4c488113e..7a7ca2706c6 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/namespaces.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go @@ -2,12 +2,15 @@ package specgen import ( "fmt" + "net" "os" "strings" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -32,6 +35,10 @@ const ( FromPod NamespaceMode = "pod" // Private indicates the namespace is private Private NamespaceMode = "private" + // Shareable indicates the namespace is shareable + Shareable NamespaceMode = "shareable" + // None indicates the IPC namespace is created without mounting /dev/shm + None NamespaceMode = "none" // NoNetwork indicates no network namespace should // be joined. loopback should still exists. // Only used with the network namespace, invalid otherwise. @@ -48,13 +55,17 @@ const ( // of the namespace itself. // Only used with the user namespace, invalid otherwise. KeepID NamespaceMode = "keep-id" + // NoMap indicates a user namespace to keep the owner uid out + // of the namespace itself. + // Only used with the user namespace, invalid otherwise. + NoMap NamespaceMode = "no-map" // Auto indicates to automatically create a user namespace. // Only used with the user namespace, invalid otherwise. Auto NamespaceMode = "auto" // DefaultKernelNamespaces is a comma-separated list of default kernel // namespaces. - DefaultKernelNamespaces = "cgroup,ipc,net,uts" + DefaultKernelNamespaces = "ipc,net,uts" ) // Namespace describes the namespace @@ -74,6 +85,11 @@ func (n *Namespace) IsHost() bool { return n.NSMode == Host } +// IsNone returns a bool if the namespace is set to none +func (n *Namespace) IsNone() bool { + return n.NSMode == None +} + // IsBridge returns a bool if the namespace is a Bridge func (n *Namespace) IsBridge() bool { return n.NSMode == Bridge @@ -109,6 +125,11 @@ func (n *Namespace) IsKeepID() bool { return n.NSMode == KeepID } +// IsNoMap indicates the namespace is NoMap +func (n *Namespace) IsNoMap() bool { + return n.NSMode == NoMap +} + func (n *Namespace) String() string { if n.Value != "" { return fmt.Sprintf("%s:%s", n.NSMode, n.Value) @@ -121,7 +142,7 @@ func validateUserNS(n *Namespace) error { return nil } switch n.NSMode { - case Auto, KeepID: + case Auto, KeepID, NoMap: return nil } return n.validate() @@ -155,6 +176,17 @@ func validateNetNS(n *Namespace) error { return nil } +func validateIPCNS(n *Namespace) error { + if n == nil { + return nil + } + switch n.NSMode { + case Shareable, None: + return nil + } + return n.validate() +} + // Validate perform simple validation on the namespace to make sure it is not // invalid from the get-go func (n *Namespace) validate() error { @@ -199,14 +231,14 @@ func ParseNamespace(ns string) (Namespace, error) { case strings.HasPrefix(ns, "ns:"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, errors.Errorf("must provide a path to a namespace when specifying ns:") + return toReturn, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"") } toReturn.NSMode = Path toReturn.Value = split[1] case strings.HasPrefix(ns, "container:"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, errors.Errorf("must provide name or ID or a container when specifying container:") + return toReturn, errors.Errorf("must provide name or ID or a container when specifying \"container:\"") } toReturn.NSMode = FromContainer toReturn.Value = split[1] @@ -234,7 +266,7 @@ func ParseCgroupNamespace(ns string) (Namespace, error) { case "private", "": toReturn.NSMode = Private default: - return toReturn, errors.Errorf("unrecognized namespace mode %s passed", ns) + return toReturn, errors.Errorf("unrecognized cgroup namespace mode %s passed", ns) } } else { toReturn.NSMode = Host @@ -242,6 +274,21 @@ func ParseCgroupNamespace(ns string) (Namespace, error) { return toReturn, nil } +// ParseIPCNamespace parses a ipc namespace specification in string +// form. +func ParseIPCNamespace(ns string) (Namespace, error) { + toReturn := Namespace{} + switch { + case ns == "shareable", ns == "": + toReturn.NSMode = Shareable + return toReturn, nil + case ns == "none": + toReturn.NSMode = None + return toReturn, nil + } + return ParseNamespace(ns) +} + // ParseUserNamespace parses a user namespace specification in string // form. func ParseUserNamespace(ns string) (Namespace, error) { @@ -261,6 +308,9 @@ func ParseUserNamespace(ns string) (Namespace, error) { case ns == "keep-id": toReturn.NSMode = KeepID return toReturn, nil + case ns == "nomap": + toReturn.NSMode = NoMap + return toReturn, nil case ns == "": toReturn.NSMode = Host return toReturn, nil @@ -271,9 +321,9 @@ func ParseUserNamespace(ns string) (Namespace, error) { // ParseNetworkNamespace parses a network namespace specification in string // form. // Returns a namespace and (optionally) a list of CNI networks to join. -func ParseNetworkNamespace(ns string, rootlessDefaultCNI bool) (Namespace, []string, error) { +func ParseNetworkNamespace(ns string, rootlessDefaultCNI bool) (Namespace, map[string]types.PerNetworkOptions, error) { toReturn := Namespace{} - var cniNetworks []string + networks := make(map[string]types.PerNetworkOptions) // Net defaults to Slirp on rootless switch { case ns == string(Slirp), strings.HasPrefix(ns, string(Slirp)+":"): @@ -299,42 +349,188 @@ func ParseNetworkNamespace(ns string, rootlessDefaultCNI bool) (Namespace, []str case strings.HasPrefix(ns, "ns:"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, nil, errors.Errorf("must provide a path to a namespace when specifying ns:") + return toReturn, nil, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"") } toReturn.NSMode = Path toReturn.Value = split[1] case strings.HasPrefix(ns, string(FromContainer)+":"): split := strings.SplitN(ns, ":", 2) if len(split) != 2 { - return toReturn, nil, errors.Errorf("must provide name or ID or a container when specifying container:") + return toReturn, nil, errors.Errorf("must provide name or ID or a container when specifying \"container:\"") } toReturn.NSMode = FromContainer toReturn.Value = split[1] default: // Assume we have been given a list of CNI networks. // Which only works in bridge mode, so set that. - cniNetworks = strings.Split(ns, ",") + networkList := strings.Split(ns, ",") + for _, net := range networkList { + networks[net] = types.PerNetworkOptions{} + } + toReturn.NSMode = Bridge } - return toReturn, cniNetworks, nil + return toReturn, networks, nil } -func ParseNetworkString(network string) (Namespace, []string, map[string][]string, error) { +// ParseNetworkFlag parses a network string slice into the network options +// If the input is nil or empty it will use the default setting from containers.conf +func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) { var networkOptions map[string][]string - parts := strings.SplitN(network, ":", 2) + // by default we try to use the containers.conf setting + // if we get at least one value use this instead + ns := containerConfig.Containers.NetNS + if len(networks) > 0 { + ns = networks[0] + } - ns, cniNets, err := ParseNetworkNamespace(network, containerConfig.Containers.RootlessNetworking == "cni") - if err != nil { - return Namespace{}, nil, nil, err + toReturn := Namespace{} + podmanNetworks := make(map[string]types.PerNetworkOptions) + + switch { + case ns == string(Slirp), strings.HasPrefix(ns, string(Slirp)+":"): + parts := strings.SplitN(ns, ":", 2) + if len(parts) > 1 { + networkOptions = make(map[string][]string) + networkOptions[parts[0]] = strings.Split(parts[1], ",") + } + toReturn.NSMode = Slirp + case ns == string(FromPod): + toReturn.NSMode = FromPod + case ns == "" || ns == string(Default) || ns == string(Private): + // Net defaults to Slirp on rootless + if rootless.IsRootless() { + toReturn.NSMode = Slirp + break + } + // if root we use bridge + fallthrough + case ns == string(Bridge), strings.HasPrefix(ns, string(Bridge)+":"): + toReturn.NSMode = Bridge + parts := strings.SplitN(ns, ":", 2) + netOpts := types.PerNetworkOptions{} + if len(parts) > 1 { + var err error + netOpts, err = parseBridgeNetworkOptions(parts[1]) + if err != nil { + return toReturn, nil, nil, err + } + } + // we have to set the special default network name here + podmanNetworks["default"] = netOpts + + case ns == string(NoNetwork): + toReturn.NSMode = NoNetwork + case ns == string(Host): + toReturn.NSMode = Host + case strings.HasPrefix(ns, "ns:"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, nil, nil, errors.Errorf("must provide a path to a namespace when specifying \"ns:\"") + } + toReturn.NSMode = Path + toReturn.Value = split[1] + case strings.HasPrefix(ns, string(FromContainer)+":"): + split := strings.SplitN(ns, ":", 2) + if len(split) != 2 { + return toReturn, nil, nil, errors.Errorf("must provide name or ID or a container when specifying \"container:\"") + } + toReturn.NSMode = FromContainer + toReturn.Value = split[1] + default: + // we should have a normal network + parts := strings.SplitN(ns, ":", 2) + if len(parts) == 1 { + // Assume we have been given a comma separated list of networks for backwards compat. + networkList := strings.Split(ns, ",") + for _, net := range networkList { + podmanNetworks[net] = types.PerNetworkOptions{} + } + } else { + if parts[0] == "" { + return toReturn, nil, nil, errors.New("network name cannot be empty") + } + netOpts, err := parseBridgeNetworkOptions(parts[1]) + if err != nil { + return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0]) + } + podmanNetworks[parts[0]] = netOpts + } + + // networks need bridge mode + toReturn.NSMode = Bridge + } + + if len(networks) > 1 { + if !toReturn.IsBridge() { + return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "cannot set multiple networks without bridge network mode, selected mode %s", toReturn.NSMode) + } + + for _, network := range networks[1:] { + parts := strings.SplitN(network, ":", 2) + if parts[0] == "" { + return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "network name cannot be empty") + } + if util.StringInSlice(parts[0], []string{string(Bridge), string(Slirp), string(FromPod), string(NoNetwork), + string(Default), string(Private), string(Path), string(FromContainer), string(Host)}) { + return toReturn, nil, nil, errors.Wrapf(define.ErrInvalidArg, "can only set extra network names, selected mode %s conflicts with bridge", parts[0]) + } + netOpts := types.PerNetworkOptions{} + if len(parts) > 1 { + var err error + netOpts, err = parseBridgeNetworkOptions(parts[1]) + if err != nil { + return toReturn, nil, nil, errors.Wrapf(err, "invalid option for network %s", parts[0]) + } + } + podmanNetworks[parts[0]] = netOpts + } + } + + return toReturn, podmanNetworks, networkOptions, nil +} + +func parseBridgeNetworkOptions(opts string) (types.PerNetworkOptions, error) { + netOpts := types.PerNetworkOptions{} + if len(opts) == 0 { + return netOpts, nil } + allopts := strings.Split(opts, ",") + for _, opt := range allopts { + split := strings.SplitN(opt, "=", 2) + switch split[0] { + case "ip", "ip6": + ip := net.ParseIP(split[1]) + if ip == nil { + return netOpts, errors.Errorf("invalid ip address %q", split[1]) + } + netOpts.StaticIPs = append(netOpts.StaticIPs, ip) + + case "mac": + mac, err := net.ParseMAC(split[1]) + if err != nil { + return netOpts, err + } + netOpts.StaticMAC = types.HardwareAddr(mac) + + case "alias": + if split[1] == "" { + return netOpts, errors.New("alias cannot be empty") + } + netOpts.Aliases = append(netOpts.Aliases, split[1]) + + case "interface_name": + if split[1] == "" { + return netOpts, errors.New("interface_name cannot be empty") + } + netOpts.InterfaceName = split[1] - if len(parts) > 1 { - networkOptions = make(map[string][]string) - networkOptions[parts[0]] = strings.Split(parts[1], ",") - cniNets = nil + default: + return netOpts, errors.Errorf("unknown bridge network option: %s", split[0]) + } } - return ns, cniNets, networkOptions, nil + return netOpts, nil } func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *generate.Generator) (string, error) { @@ -364,20 +560,41 @@ func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *gene g.SetProcessUID(uint32(uid)) g.SetProcessGID(uint32(gid)) user = fmt.Sprintf("%d:%d", uid, gid) - fallthrough - case Private: - if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil { + if err := privateUserNamespace(idmappings, g); err != nil { return user, err } - if idmappings == nil || (len(idmappings.UIDMap) == 0 && len(idmappings.GIDMap) == 0) { - return user, errors.Errorf("must provide at least one UID or GID mapping to configure a user namespace") + case NoMap: + mappings, uid, gid, err := util.GetNoMapMapping() + if err != nil { + return user, err } - for _, uidmap := range idmappings.UIDMap { - g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size)) + idmappings = mappings + g.SetProcessUID(uint32(uid)) + g.SetProcessGID(uint32(gid)) + user = fmt.Sprintf("%d:%d", uid, gid) + if err := privateUserNamespace(idmappings, g); err != nil { + return user, err } - for _, gidmap := range idmappings.GIDMap { - g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size)) + case Private: + if err := privateUserNamespace(idmappings, g); err != nil { + return user, err } } return user, nil } + +func privateUserNamespace(idmappings *storage.IDMappingOptions, g *generate.Generator) error { + if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil { + return err + } + if idmappings == nil || (len(idmappings.UIDMap) == 0 && len(idmappings.GIDMap) == 0) { + return errors.Errorf("must provide at least one UID or GID mapping to configure a user namespace") + } + for _, uidmap := range idmappings.UIDMap { + g.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size)) + } + for _, gidmap := range idmappings.GIDMap { + g.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size)) + } + return nil +} diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/pod_validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go similarity index 81% rename from vendor/github.com/containers/podman/v3/pkg/specgen/pod_validate.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go index bca7b6dbe7f..8d971a25e20 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/pod_validate.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go @@ -1,8 +1,7 @@ package specgen import ( - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/util" + "github.com/containers/podman/v4/pkg/util" "github.com/pkg/errors" ) @@ -19,15 +18,6 @@ func exclusivePodOptions(opt1, opt2 string) error { // Validate verifies the input is valid func (p *PodSpecGenerator) Validate() error { - if rootless.IsRootless() && len(p.CNINetworks) == 0 { - if p.StaticIP != nil { - return ErrNoStaticIPRootless - } - if p.StaticMAC != nil { - return ErrNoStaticMACRootless - } - } - // PodBasicConfig if p.NoInfra { if len(p.InfraCommand) > 0 { @@ -52,11 +42,10 @@ func (p *PodSpecGenerator) Validate() error { if p.NetNS.NSMode != Default && p.NetNS.NSMode != "" { return errors.New("NoInfra and network modes cannot be used together") } - if p.StaticIP != nil { - return exclusivePodOptions("NoInfra", "StaticIP") - } - if p.StaticMAC != nil { - return exclusivePodOptions("NoInfra", "StaticMAC") + // Note that networks might be set when --ip or --mac was set + // so we need to check that no networks are set without the infra + if len(p.Networks) > 0 { + return errors.New("cannot set networks options without infra container") } if len(p.DNSOption) > 0 { return exclusivePodOptions("NoInfra", "DNSOption") @@ -78,10 +67,8 @@ func (p *PodSpecGenerator) Validate() error { if len(p.PortMappings) > 0 { return errors.New("PortMappings can only be used with Bridge or slirp4netns networking") } - if len(p.CNINetworks) > 0 { - return errors.New("CNINetworks can only be used with Bridge mode networking") - } } + if p.NoManageResolvConf { if len(p.DNSServer) > 0 { return exclusivePodOptions("NoManageResolvConf", "DNSServer") diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/podspecgen.go b/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go similarity index 87% rename from vendor/github.com/containers/podman/v3/pkg/specgen/podspecgen.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go index 7713ea26c07..759caa0c0c1 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/podspecgen.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go @@ -3,7 +3,7 @@ package specgen import ( "net" - "github.com/containers/podman/v3/libpod/network/types" + "github.com/containers/common/libnetwork/types" spec "github.com/opencontainers/runtime-spec/specs-go" ) @@ -63,6 +63,8 @@ type PodBasicConfig struct { // also be used by some tools that wish to recreate the pod // (e.g. `podman generate systemd --new`). // Optional. + // ShareParent determines if all containers in the pod will share the pod's cgroup as the cgroup parent + ShareParent *bool `json:"share_parent,omitempty"` PodCreateCommand []string `json:"pod_create_command,omitempty"` // Pid sets the process id namespace of the pod // Optional (defaults to private if unset). This sets the PID namespace of the infra container @@ -74,6 +76,8 @@ type PodBasicConfig struct { Userns Namespace `json:"userns,omitempty"` // Devices contains user specified Devices to be added to the Pod Devices []string `json:"pod_devices,omitempty"` + // Sysctl sets kernel parameters for the pod + Sysctl map[string]string `json:"sysctl,omitempty"` } // PodNetworkConfig contains networking configuration for a pod. @@ -86,32 +90,26 @@ type PodNetworkConfig struct { // Defaults to Bridge as root and Slirp as rootless. // Mandatory. NetNS Namespace `json:"netns,omitempty"` - // StaticIP sets a static IP for the infra container. As the infra - // container's network is used for the entire pod by default, this will - // thus be a static IP for the whole pod. - // Only available if NetNS is set to Bridge (the default for root). - // As such, conflicts with NoInfra=true by proxy. - // Optional. - StaticIP *net.IP `json:"static_ip,omitempty"` - // StaticMAC sets a static MAC for the infra container. As the infra - // container's network is used for the entire pod by default, this will - // thus be a static MAC for the entire pod. - // Only available if NetNS is set to Bridge (the default for root). - // As such, conflicts with NoInfra=true by proxy. - // Optional. - StaticMAC *net.HardwareAddr `json:"static_mac,omitempty"` // PortMappings is a set of ports to map into the infra container. // As, by default, containers share their network with the infra // container, this will forward the ports to the entire pod. // Only available if NetNS is set to Bridge or Slirp. // Optional. PortMappings []types.PortMapping `json:"portmappings,omitempty"` - // CNINetworks is a list of CNI networks that the infra container will - // join. As, by default, containers share their network with the infra - // container, these networks will effectively be joined by the - // entire pod. - // Only available when NetNS is set to Bridge, the default for root. - // Optional. + // Map of networks names to ids the container should join to. + // You can request additional settings for each network, you can + // set network aliases, static ips, static mac address and the + // network interface name for this container on the specific network. + // If the map is empty and the bridge network mode is set the container + // will be joined to the default network. + Networks map[string]types.PerNetworkOptions + // CNINetworks is a list of CNI networks to join the container to. + // If this list is empty, the default CNI network will be joined + // instead. If at least one entry is present, we will not join the + // default network (unless it is part of this list). + // Only available if NetNS is set to bridge. + // Optional. + // Deprecated: as of podman 4.0 use "Networks" instead. CNINetworks []string `json:"cni_networks,omitempty"` // NoManageResolvConf indicates that /etc/resolv.conf should not be // managed by the pod. Instead, each container will create and manage a @@ -187,7 +185,7 @@ type PodStorageConfig struct { // PodCgroupConfig contains configuration options about a pod's cgroups. // This will be expanded in future updates to pods. type PodCgroupConfig struct { - // CgroupParent is the parent for the CGroup that the pod will create. + // CgroupParent is the parent for the Cgroup that the pod will create. // This pod cgroup will, in turn, be the default cgroup parent for all // containers in the pod. // Optional. @@ -202,6 +200,7 @@ type PodSpecGenerator struct { PodCgroupConfig PodResourceConfig PodStorageConfig + PodSecurityConfig InfraContainerSpec *SpecGenerator `json:"-"` } @@ -216,6 +215,10 @@ type PodResourceConfig struct { ThrottleReadBpsDevice map[string]spec.LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"` } +type PodSecurityConfig struct { + SecurityOpt []string `json:"security_opt,omitempty"` +} + // NewPodSpecGenerator creates a new pod spec func NewPodSpecGenerator() *PodSpecGenerator { return &PodSpecGenerator{} diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/specgen.go b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go similarity index 89% rename from vendor/github.com/containers/podman/v3/pkg/specgen/specgen.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go index dbb66929195..79e20667b12 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/specgen.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go @@ -6,8 +6,8 @@ import ( "syscall" "github.com/containers/common/libimage" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/manifest" - nettypes "github.com/containers/podman/v3/libpod/network/types" "github.com/containers/storage/types" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -152,6 +152,9 @@ type ContainerBasicConfig struct { // Conflicts with UtsNS if UtsNS is not set to private. // Optional. Hostname string `json:"hostname,omitempty"` + // HostUses is a list of host usernames or UIDs to add to the container + // /etc/passwd file + HostUsers []string `json:"hostusers,omitempty"` // Sysctl sets kernel parameters for the container Sysctl map[string]string `json:"sysctl,omitempty"` // Remove indicates if the container should be removed once it has been started @@ -194,6 +197,17 @@ type ContainerBasicConfig struct { // The execution domain system allows Linux to provide limited support // for binaries compiled under other UNIX-like operating systems. Personality *spec.LinuxPersonality `json:"personality,omitempty"` + // UnsetEnv unsets the specified default environment variables from the image or from buildin or containers.conf + // Optional. + UnsetEnv []string `json:"unsetenv,omitempty"` + // UnsetEnvAll unsetall default environment variables from the image or from buildin or containers.conf + // UnsetEnvAll unsets all default environment variables from the image or from buildin + // Optional. + UnsetEnvAll bool `json:"unsetenvall,omitempty"` + // Passwd is a container run option that determines if we are validating users/groups before running the container + Passwd *bool `json:"manage_password,omitempty"` + // PasswdEntry specifies arbitrary data to append to a file. + PasswdEntry string `json:"passwd_entry,omitempty"` } // ContainerStorageConfig contains information on the storage configuration of a @@ -211,7 +225,7 @@ type ContainerStorageConfig struct { // Conflicts with Image. // At least one of Image or Rootfs must be specified. Rootfs string `json:"rootfs,omitempty"` - // RootfsOverlay tells if rootfs is actuall an overlay on top of base path + // RootfsOverlay tells if rootfs is actually an overlay on top of base path RootfsOverlay bool `json:"rootfs_overlay,omitempty"` // ImageVolumeMode indicates how image volumes will be created. // Supported modes are "ignore" (do not create), "tmpfs" (create as @@ -252,9 +266,9 @@ type ContainerStorageConfig struct { // Devices are devices that will be added to the container. // Optional. Devices []spec.LinuxDevice `json:"devices,omitempty"` - // DeviceCGroupRule are device cgroup rules that allow containers + // DeviceCgroupRule are device cgroup rules that allow containers // to use additional types of devices. - DeviceCGroupRule []spec.LinuxDeviceCgroup `json:"device_cgroup_rule,omitempty"` + DeviceCgroupRule []spec.LinuxDeviceCgroup `json:"device_cgroup_rule,omitempty"` // DevicesFrom is a way to ensure your container inherits device specific information from another container DevicesFrom []string `json:"devices_from,omitempty"` // HostDeviceList is used to recreate the mounted device on inherited containers @@ -272,6 +286,13 @@ type ContainerStorageConfig struct { // If unset, the default, /, will be used. // Optional. WorkDir string `json:"work_dir,omitempty"` + // Create the working directory if it doesn't exist. + // If unset, it doesn't create it. + // Optional. + CreateWorkingDir bool `json:"create_working_dir,omitempty"` + // StorageOpts is the container's storage options + // Optional. + StorageOpts map[string]string `json:"storage_opts,omitempty"` // RootfsPropagation is the rootfs propagation mode for the container. // If not set, the default of rslave will be used. // Optional. @@ -282,6 +303,10 @@ type ContainerStorageConfig struct { // Volatile specifies whether the container storage can be optimized // at the cost of not syncing all the dirty files in memory. Volatile bool `json:"volatile,omitempty"` + // ChrootDirs is an additional set of directories that need to be + // treated as root directories. Standard bind mounts will be mounted + // into paths relative to these directories. + ChrootDirs []string `json:"chroot_directories,omitempty"` } // ContainerSecurityConfig is a container's security features, including @@ -371,7 +396,7 @@ type ContainerCgroupConfig struct { // CgroupsMode sets a policy for how cgroups will be created in the // container, including the ability to disable creation entirely. CgroupsMode string `json:"cgroups_mode,omitempty"` - // CgroupParent is the container's CGroup parent. + // CgroupParent is the container's Cgroup parent. // If not set, the default for the current cgroup driver will be used. // Optional. CgroupParent string `json:"cgroup_parent,omitempty"` @@ -380,25 +405,10 @@ type ContainerCgroupConfig struct { // ContainerNetworkConfig contains information on a container's network // configuration. type ContainerNetworkConfig struct { - // Aliases are a list of network-scoped aliases for container - // Optional - Aliases map[string][]string `json:"aliases"` // NetNS is the configuration to use for the container's network // namespace. // Mandatory. NetNS Namespace `json:"netns,omitempty"` - // StaticIP is the a IPv4 address of the container. - // Only available if NetNS is set to Bridge. - // Optional. - StaticIP *net.IP `json:"static_ip,omitempty"` - // StaticIPv6 is a static IPv6 address to set in the container. - // Only available if NetNS is set to Bridge. - // Optional. - StaticIPv6 *net.IP `json:"static_ipv6,omitempty"` - // StaticMAC is a static MAC address to set in the container. - // Only available if NetNS is set to bridge. - // Optional. - StaticMAC *net.HardwareAddr `json:"static_mac,omitempty"` // PortBindings is a set of ports to map into the container. // Only available if NetNS is set to bridge or slirp. // Optional. @@ -419,12 +429,20 @@ type ContainerNetworkConfig struct { // PublishExposedPorts is set. // Optional. Expose map[uint16]string `json:"expose,omitempty"` + // Map of networks names or ids that the container should join. + // You can request additional settings for each network, you can + // set network aliases, static ips, static mac address and the + // network interface name for this container on the specific network. + // If the map is empty and the bridge network mode is set the container + // will be joined to the default network. + Networks map[string]nettypes.PerNetworkOptions // CNINetworks is a list of CNI networks to join the container to. // If this list is empty, the default CNI network will be joined // instead. If at least one entry is present, we will not join the // default network (unless it is part of this list). // Only available if NetNS is set to bridge. // Optional. + // Deprecated: as of podman 4.0 use "Networks" instead. CNINetworks []string `json:"cni_networks,omitempty"` // UseImageResolvConf indicates that resolv.conf should not be managed // by Podman, but instead sourced from the image. @@ -451,7 +469,13 @@ type ContainerNetworkConfig struct { // UseImageHosts indicates that /etc/hosts should not be managed by // Podman, and instead sourced from the image. // Conflicts with HostAdd. - UseImageHosts bool `json:"use_image_hosts,omitempty"` + // Do not set omitempty here, if this is false it should be set to not get + // the server default. + // Ideally this would be a pointer so we could differentiate between an + // explicitly false/true and unset (containers.conf default). However + // specgen is stable so we can not change this right now. + // TODO (5.0): change to pointer + UseImageHosts bool `json:"use_image_hosts"` // HostAdd is a set of hosts which will be added to the container's // /etc/hosts file. // Conflicts with UseImageHosts. @@ -532,6 +556,7 @@ func (s *SpecGenerator) GetImage() (*libimage.Image, string) { type Secret struct { Source string + Target string UID uint32 GID uint32 Mode uint32 @@ -551,11 +576,11 @@ func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator { csc := ContainerStorageConfig{} if rootfs { csc.Rootfs = arg - // check if rootfs is actually overlayed - parts := strings.SplitN(csc.Rootfs, ":", 2) - if len(parts) > 1 && parts[1] == "O" { + // check if rootfs should use overlay + lastColonIndex := strings.LastIndex(csc.Rootfs, ":") + if lastColonIndex != -1 && lastColonIndex+1 < len(csc.Rootfs) && csc.Rootfs[lastColonIndex+1:] == "O" { csc.RootfsOverlay = true - csc.Rootfs = parts[0] + csc.Rootfs = csc.Rootfs[:lastColonIndex] } } else { csc.Image = arg diff --git a/vendor/github.com/containers/podman/v3/pkg/specgen/volumes.go b/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go similarity index 89% rename from vendor/github.com/containers/podman/v3/pkg/specgen/volumes.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go index eca8c0c35c9..b26666df3db 100644 --- a/vendor/github.com/containers/podman/v3/pkg/specgen/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go @@ -65,7 +65,7 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na err error ) - splitVol := strings.Split(vol, ":") + splitVol := SplitVolumeString(vol) if len(splitVol) > 3 { return nil, nil, nil, errors.Wrapf(volumeFormatErr, vol) } @@ -93,7 +93,7 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na } } - if strings.HasPrefix(src, "/") || strings.HasPrefix(src, ".") { + if strings.HasPrefix(src, "/") || strings.HasPrefix(src, ".") || isHostWinPath(src) { // This is not a named volume overlayFlag := false chownFlag := false @@ -152,3 +152,26 @@ func GenVolumeMounts(volumeFlag []string) (map[string]spec.Mount, map[string]*Na return mounts, volumes, overlayVolumes, nil } + +// Splits a volume string, accounting for Win drive paths +// when running as a WSL linux guest or Windows client +func SplitVolumeString(vol string) []string { + parts := strings.Split(vol, ":") + if !shouldResolveWinPaths() { + return parts + } + + // Skip extended marker prefix if present + n := 0 + if strings.HasPrefix(vol, `\\?\`) { + n = 4 + } + + if hasWinDriveScheme(vol, n) { + first := parts[0] + ":" + parts[1] + parts = parts[1:] + parts[0] = first + } + + return parts +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go new file mode 100644 index 00000000000..0df4ebdd7b7 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go @@ -0,0 +1,60 @@ +package specgen + +import ( + "fmt" + "strings" + "unicode" + + "github.com/pkg/errors" +) + +func isHostWinPath(path string) bool { + return shouldResolveWinPaths() && strings.HasPrefix(path, `\\`) || hasWinDriveScheme(path, 0) || winPathExists(path) +} + +func hasWinDriveScheme(path string, start int) bool { + if len(path) < start+2 || path[start+1] != ':' { + return false + } + + drive := rune(path[start]) + return drive < unicode.MaxASCII && unicode.IsLetter(drive) +} + +// Converts a Windows path to a WSL guest path if local env is a WSL linux guest or this is a Windows client. +func ConvertWinMountPath(path string) (string, error) { + if !shouldResolveWinPaths() { + return path, nil + } + + if strings.HasPrefix(path, "/") { + // Handle /[driveletter]/windows/path form (e.g. c:\Users\bar == /c/Users/bar) + if len(path) > 2 && path[2] == '/' && shouldResolveUnixWinVariant(path) { + drive := unicode.ToLower(rune(path[1])) + if unicode.IsLetter(drive) && drive <= unicode.MaxASCII { + return fmt.Sprintf("/mnt/%c/%s", drive, path[3:]), nil + } + } + + // unix path - pass through + return path, nil + } + + // Convert remote win client relative paths to absolute + path = resolveRelativeOnWindows(path) + + // Strip extended marker prefix if present + path = strings.TrimPrefix(path, `\\?\`) + + // Drive installed via wsl --mount + switch { + case strings.HasPrefix(path, `\\.\`): + path = "/mnt/wsl/" + path[4:] + case len(path) > 1 && path[1] == ':': + path = "/mnt/" + strings.ToLower(path[0:1]) + path[2:] + default: + return path, errors.New("unsupported UNC path") + } + + return strings.ReplaceAll(path, `\`, "/"), nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go new file mode 100644 index 00000000000..f42ac76399d --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go @@ -0,0 +1,24 @@ +package specgen + +import ( + "os" + + "github.com/containers/common/pkg/machine" +) + +func shouldResolveWinPaths() bool { + return machine.MachineHostType() == "wsl" +} + +func shouldResolveUnixWinVariant(path string) bool { + _, err := os.Stat(path) + return err != nil +} + +func resolveRelativeOnWindows(path string) string { + return path +} + +func winPathExists(path string) bool { + return false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go new file mode 100644 index 00000000000..4cd008fddcd --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go @@ -0,0 +1,20 @@ +//go:build !linux && !windows +// +build !linux,!windows + +package specgen + +func shouldResolveWinPaths() bool { + return false +} + +func shouldResolveUnixWinVariant(path string) bool { + return false +} + +func resolveRelativeOnWindows(path string) string { + return path +} + +func winPathExists(path string) bool { + return false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go new file mode 100644 index 00000000000..c6aad314a22 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go @@ -0,0 +1,30 @@ +package specgen + +import ( + "github.com/sirupsen/logrus" + "os" + "path/filepath" +) + +func shouldResolveUnixWinVariant(path string) bool { + return true +} + +func shouldResolveWinPaths() bool { + return true +} + +func resolveRelativeOnWindows(path string) string { + ret, err := filepath.Abs(path) + if err != nil { + logrus.Debugf("problem resolving possible relative path %q: %s", path, err.Error()) + return path + } + + return ret +} + +func winPathExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/createparse.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/createparse.go new file mode 100644 index 00000000000..fb5f9c351b2 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/createparse.go @@ -0,0 +1,37 @@ +package specgenutil + +import ( + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/pkg/errors" +) + +// validate determines if the flags and values given by the user are valid. things checked +// by validate must not need any state information on the flag (i.e. changed) +func validate(c *entities.ContainerCreateOptions) error { + var () + if c.Rm && (c.Restart != "" && c.Restart != "no" && c.Restart != "on-failure") { + return errors.Errorf(`the --rm option conflicts with --restart, when the restartPolicy is not "" and "no"`) + } + + if _, err := config.ParsePullPolicy(c.Pull); err != nil { + return err + } + + var imageVolType = map[string]string{ + "bind": "", + "tmpfs": "", + "ignore": "", + } + if _, ok := imageVolType[c.ImageVolume]; !ok { + switch { + case c.IsInfra: + c.ImageVolume = "bind" + case c.IsClone: // the image volume type will be deduced later from the container we are cloning + return nil + default: + return errors.Errorf("invalid image-volume type %q. Pick one of bind, tmpfs, or ignore", c.ImageVolume) + } + } + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/ports.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/ports.go new file mode 100644 index 00000000000..6cc4de1ed44 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/ports.go @@ -0,0 +1,22 @@ +package specgenutil + +import ( + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +func verifyExpose(expose []string) error { + // add the expose ports from the user (--expose) + // can be single or a range + for _, expose := range expose { + // support two formats for expose, original format /[] or /[] + _, port := nat.SplitProtoPort(expose) + // parse the start and end port and create a sequence of ports to expose + // if expose a port, the start and end port are the same + _, _, err := nat.ParsePortRange(port) + if err != nil { + return errors.Wrapf(err, "invalid range format for --expose: %s", expose) + } + } + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go new file mode 100644 index 00000000000..9cb2f200b0f --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go @@ -0,0 +1,1147 @@ +package specgenutil + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/manifest" + "github.com/containers/podman/v4/cmd/podman/parse" + "github.com/containers/podman/v4/libpod/define" + ann "github.com/containers/podman/v4/pkg/annotations" + "github.com/containers/podman/v4/pkg/domain/entities" + envLib "github.com/containers/podman/v4/pkg/env" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/specgen" + systemdDefine "github.com/containers/podman/v4/pkg/systemd/define" + "github.com/containers/podman/v4/pkg/util" + "github.com/docker/docker/opts" + "github.com/docker/go-units" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func getCPULimits(c *entities.ContainerCreateOptions) *specs.LinuxCPU { + cpu := &specs.LinuxCPU{} + hasLimits := false + + if c.CPUS > 0 { + period, quota := util.CoresToPeriodAndQuota(c.CPUS) + + cpu.Period = &period + cpu.Quota = "a + hasLimits = true + } + if c.CPUShares > 0 { + cpu.Shares = &c.CPUShares + hasLimits = true + } + if c.CPUPeriod > 0 { + cpu.Period = &c.CPUPeriod + hasLimits = true + } + if c.CPUSetCPUs != "" { + cpu.Cpus = c.CPUSetCPUs + hasLimits = true + } + if c.CPUSetMems != "" { + cpu.Mems = c.CPUSetMems + hasLimits = true + } + if c.CPUQuota > 0 { + cpu.Quota = &c.CPUQuota + hasLimits = true + } + if c.CPURTPeriod > 0 { + cpu.RealtimePeriod = &c.CPURTPeriod + hasLimits = true + } + if c.CPURTRuntime > 0 { + cpu.RealtimeRuntime = &c.CPURTRuntime + hasLimits = true + } + + if !hasLimits { + return nil + } + return cpu +} + +func getIOLimits(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) (*specs.LinuxBlockIO, error) { + var err error + io := &specs.LinuxBlockIO{} + hasLimits := false + if b := c.BlkIOWeight; len(b) > 0 { + u, err := strconv.ParseUint(b, 10, 16) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for blkio-weight") + } + nu := uint16(u) + io.Weight = &nu + hasLimits = true + } + + if len(c.BlkIOWeightDevice) > 0 { + if s.WeightDevice, err = parseWeightDevices(c.BlkIOWeightDevice); err != nil { + return nil, err + } + hasLimits = true + } + + if bps := c.DeviceReadBPs; len(bps) > 0 { + if s.ThrottleReadBpsDevice, err = parseThrottleBPSDevices(bps); err != nil { + return nil, err + } + hasLimits = true + } + + if bps := c.DeviceWriteBPs; len(bps) > 0 { + if s.ThrottleWriteBpsDevice, err = parseThrottleBPSDevices(bps); err != nil { + return nil, err + } + hasLimits = true + } + + if iops := c.DeviceReadIOPs; len(iops) > 0 { + if s.ThrottleReadIOPSDevice, err = parseThrottleIOPsDevices(iops); err != nil { + return nil, err + } + hasLimits = true + } + + if iops := c.DeviceWriteIOPs; len(iops) > 0 { + if s.ThrottleWriteIOPSDevice, err = parseThrottleIOPsDevices(iops); err != nil { + return nil, err + } + hasLimits = true + } + + if !hasLimits { + return nil, nil + } + return io, nil +} + +func LimitToSwap(memory *specs.LinuxMemory, swap string, ml int64) { + if ml > 0 { + memory.Limit = &ml + if swap == "" { + limit := 2 * ml + memory.Swap = &(limit) + } + } +} + +func getMemoryLimits(c *entities.ContainerCreateOptions) (*specs.LinuxMemory, error) { + var err error + memory := &specs.LinuxMemory{} + hasLimits := false + if m := c.Memory; len(m) > 0 { + ml, err := units.RAMInBytes(m) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory") + } + LimitToSwap(memory, c.MemorySwap, ml) + hasLimits = true + } + if m := c.MemoryReservation; len(m) > 0 { + mr, err := units.RAMInBytes(m) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory") + } + memory.Reservation = &mr + hasLimits = true + } + if m := c.MemorySwap; len(m) > 0 { + var ms int64 + // only set memory swap if it was set + // -1 indicates unlimited + if m != "-1" { + ms, err = units.RAMInBytes(m) + memory.Swap = &ms + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory") + } + hasLimits = true + } + } + if c.MemorySwappiness >= 0 { + swappiness := uint64(c.MemorySwappiness) + memory.Swappiness = &swappiness + hasLimits = true + } + if c.OOMKillDisable { + memory.DisableOOMKiller = &c.OOMKillDisable + hasLimits = true + } + if !hasLimits { + return nil, nil + } + return memory, nil +} + +func setNamespaces(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) error { + var err error + + if c.PID != "" { + s.PidNS, err = specgen.ParseNamespace(c.PID) + if err != nil { + return err + } + } + if c.IPC != "" { + s.IpcNS, err = specgen.ParseIPCNamespace(c.IPC) + if err != nil { + return err + } + } + if c.UTS != "" { + s.UtsNS, err = specgen.ParseNamespace(c.UTS) + if err != nil { + return err + } + } + if c.CgroupNS != "" { + s.CgroupNS, err = specgen.ParseNamespace(c.CgroupNS) + if err != nil { + return err + } + } + userns := os.Getenv("PODMAN_USERNS") + if c.UserNS != "" { + userns = c.UserNS + } + // userns must be treated differently + if userns != "" { + s.UserNS, err = specgen.ParseUserNamespace(userns) + if err != nil { + return err + } + } + if c.Net != nil { + s.NetNS = c.Net.Network + } + return nil +} + +func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions, args []string) error { + var ( + err error + ) + // validate flags as needed + if err := validate(c); err != nil { + return err + } + s.User = c.User + var inputCommand []string + if !c.IsInfra { + if len(args) > 1 { + inputCommand = args[1:] + } + } + + if len(c.HealthCmd) > 0 { + if c.NoHealthCheck { + return errors.New("Cannot specify both --no-healthcheck and --health-cmd") + } + s.HealthConfig, err = makeHealthCheckFromCli(c.HealthCmd, c.HealthInterval, c.HealthRetries, c.HealthTimeout, c.HealthStartPeriod) + if err != nil { + return err + } + } else if c.NoHealthCheck { + s.HealthConfig = &manifest.Schema2HealthConfig{ + Test: []string{"NONE"}, + } + } + if err := setNamespaces(s, c); err != nil { + return err + } + + if s.IDMappings == nil { + userNS := namespaces.UsernsMode(s.UserNS.NSMode) + tempIDMap, err := util.ParseIDMapping(namespaces.UsernsMode(c.UserNS), []string{}, []string{}, "", "") + if err != nil { + return err + } + s.IDMappings, err = util.ParseIDMapping(userNS, c.UIDMap, c.GIDMap, c.SubUIDName, c.SubGIDName) + if err != nil { + return err + } + if len(s.IDMappings.GIDMap) == 0 { + s.IDMappings.AutoUserNsOpts.AdditionalGIDMappings = tempIDMap.AutoUserNsOpts.AdditionalGIDMappings + if s.UserNS.NSMode == specgen.NamespaceMode("auto") { + s.IDMappings.AutoUserNs = true + } + } + if len(s.IDMappings.UIDMap) == 0 { + s.IDMappings.AutoUserNsOpts.AdditionalUIDMappings = tempIDMap.AutoUserNsOpts.AdditionalUIDMappings + if s.UserNS.NSMode == specgen.NamespaceMode("auto") { + s.IDMappings.AutoUserNs = true + } + } + if tempIDMap.AutoUserNsOpts.Size != 0 { + s.IDMappings.AutoUserNsOpts.Size = tempIDMap.AutoUserNsOpts.Size + } + // If some mappings are specified, assume a private user namespace + if userNS.IsDefaultValue() && (!s.IDMappings.HostUIDMapping || !s.IDMappings.HostGIDMapping) { + s.UserNS.NSMode = specgen.Private + } else { + s.UserNS.NSMode = specgen.NamespaceMode(userNS) + } + } + + if !s.Terminal { + s.Terminal = c.TTY + } + + if err := verifyExpose(c.Expose); err != nil { + return err + } + // We are not handling the Expose flag yet. + // s.PortsExpose = c.Expose + if c.Net != nil { + s.PortMappings = c.Net.PublishPorts + } + if !s.PublishExposedPorts { + s.PublishExposedPorts = c.PublishAll + } + + if len(s.Pod) == 0 { + s.Pod = c.Pod + } + + if len(c.PodIDFile) > 0 { + if len(s.Pod) > 0 { + return errors.New("Cannot specify both --pod and --pod-id-file") + } + podID, err := ReadPodIDFile(c.PodIDFile) + if err != nil { + return err + } + s.Pod = podID + } + + expose, err := CreateExpose(c.Expose) + if err != nil { + return err + } + + if len(s.Expose) == 0 { + s.Expose = expose + } + + if sig := c.StopSignal; len(sig) > 0 { + stopSignal, err := util.ParseSignal(sig) + if err != nil { + return err + } + s.StopSignal = &stopSignal + } + + // ENVIRONMENT VARIABLES + // + // Precedence order (higher index wins): + // 1) containers.conf (EnvHost, EnvHTTP, Env) 2) image data, 3 User EnvHost/EnvHTTP, 4) env-file, 5) env + // containers.conf handled and image data handled on the server side + // user specified EnvHost and EnvHTTP handled on Server Side relative to Server + // env-file and env handled on client side + var env map[string]string + + // First transform the os env into a map. We need it for the labels later in + // any case. + osEnv, err := envLib.ParseSlice(os.Environ()) + if err != nil { + return errors.Wrap(err, "error parsing host environment variables") + } + + if !s.EnvHost { + s.EnvHost = c.EnvHost + } + + if !s.HTTPProxy { + s.HTTPProxy = c.HTTPProxy + } + + // env-file overrides any previous variables + for _, f := range c.EnvFile { + fileEnv, err := envLib.ParseFile(f) + if err != nil { + return err + } + // File env is overridden by env. + env = envLib.Join(env, fileEnv) + } + + parsedEnv, err := envLib.ParseSlice(c.Env) + if err != nil { + return err + } + + if len(s.Env) == 0 { + s.Env = envLib.Join(env, parsedEnv) + } + + // LABEL VARIABLES + labels, err := parse.GetAllLabels(c.LabelFile, c.Label) + if err != nil { + return errors.Wrapf(err, "unable to process labels") + } + + if systemdUnit, exists := osEnv[systemdDefine.EnvVariable]; exists { + labels[systemdDefine.EnvVariable] = systemdUnit + } + + if len(s.Labels) == 0 { + s.Labels = labels + } + + // ANNOTATIONS + annotations := make(map[string]string) + + // First, add our default annotations + annotations[ann.TTY] = "false" + if c.TTY { + annotations[ann.TTY] = "true" + } + + // Last, add user annotations + for _, annotation := range c.Annotation { + splitAnnotation := strings.SplitN(annotation, "=", 2) + if len(splitAnnotation) < 2 { + return errors.Errorf("Annotations must be formatted KEY=VALUE") + } + annotations[splitAnnotation[0]] = splitAnnotation[1] + } + if len(s.Annotations) == 0 { + s.Annotations = annotations + } + + if len(c.StorageOpts) > 0 { + opts := make(map[string]string, len(c.StorageOpts)) + for _, opt := range c.StorageOpts { + split := strings.SplitN(opt, "=", 2) + if len(split) != 2 { + return errors.Errorf("storage-opt must be formatted KEY=VALUE") + } + opts[split[0]] = split[1] + } + s.StorageOpts = opts + } + if len(s.WorkDir) == 0 { + s.WorkDir = c.Workdir + } + if c.Entrypoint != nil { + entrypoint := []string{} + // Check if entrypoint specified is json + if err := json.Unmarshal([]byte(*c.Entrypoint), &entrypoint); err != nil { + entrypoint = append(entrypoint, *c.Entrypoint) + } + s.Entrypoint = entrypoint + } + + // Include the command used to create the container. + + if len(s.ContainerCreateCommand) == 0 { + s.ContainerCreateCommand = os.Args + } + + if len(inputCommand) > 0 { + s.Command = inputCommand + } + + // SHM Size + if c.ShmSize != "" { + var m opts.MemBytes + if err := m.Set(c.ShmSize); err != nil { + return errors.Wrapf(err, "unable to translate --shm-size") + } + val := m.Value() + s.ShmSize = &val + } + + if c.Net != nil { + s.Networks = c.Net.Networks + } + + if c.Net != nil { + s.HostAdd = c.Net.AddHosts + s.UseImageResolvConf = c.Net.UseImageResolvConf + s.DNSServers = c.Net.DNSServers + s.DNSSearch = c.Net.DNSSearch + s.DNSOptions = c.Net.DNSOptions + s.NetworkOptions = c.Net.NetworkOptions + s.UseImageHosts = c.Net.NoHosts + } + if len(s.HostUsers) == 0 || len(c.HostUsers) != 0 { + s.HostUsers = c.HostUsers + } + if len(s.ImageVolumeMode) == 0 || len(c.ImageVolume) != 0 { + s.ImageVolumeMode = c.ImageVolume + } + if s.ImageVolumeMode == "bind" { + s.ImageVolumeMode = "anonymous" + } + + if len(s.Systemd) == 0 || len(c.Systemd) != 0 { + s.Systemd = strings.ToLower(c.Systemd) + } + if len(s.SdNotifyMode) == 0 || len(c.SdNotifyMode) != 0 { + s.SdNotifyMode = c.SdNotifyMode + } + if s.ResourceLimits == nil { + s.ResourceLimits = &specs.LinuxResources{} + } + + if s.ResourceLimits.Memory == nil || (len(c.Memory) != 0 || len(c.MemoryReservation) != 0 || len(c.MemorySwap) != 0 || c.MemorySwappiness != 0) { + s.ResourceLimits.Memory, err = getMemoryLimits(c) + if err != nil { + return err + } + } + if s.ResourceLimits.BlockIO == nil || (len(c.BlkIOWeight) != 0 || len(c.BlkIOWeightDevice) != 0) { + s.ResourceLimits.BlockIO, err = getIOLimits(s, c) + if err != nil { + return err + } + } + if c.PIDsLimit != nil { + pids := specs.LinuxPids{ + Limit: *c.PIDsLimit, + } + + s.ResourceLimits.Pids = &pids + } + + if s.ResourceLimits.CPU == nil || (c.CPUPeriod != 0 || c.CPUQuota != 0 || c.CPURTPeriod != 0 || c.CPURTRuntime != 0 || c.CPUS != 0 || len(c.CPUSetCPUs) != 0 || len(c.CPUSetMems) != 0 || c.CPUShares != 0) { + s.ResourceLimits.CPU = getCPULimits(c) + } + + unifieds := make(map[string]string) + for _, unified := range c.CgroupConf { + splitUnified := strings.SplitN(unified, "=", 2) + if len(splitUnified) < 2 { + return errors.Errorf("--cgroup-conf must be formatted KEY=VALUE") + } + unifieds[splitUnified[0]] = splitUnified[1] + } + if len(unifieds) > 0 { + s.ResourceLimits.Unified = unifieds + } + + if s.ResourceLimits.CPU == nil && s.ResourceLimits.Pids == nil && s.ResourceLimits.BlockIO == nil && s.ResourceLimits.Memory == nil && s.ResourceLimits.Unified == nil { + s.ResourceLimits = nil + } + + if s.LogConfiguration == nil { + s.LogConfiguration = &specgen.LogConfig{} + } + + if ld := c.LogDriver; len(ld) > 0 { + s.LogConfiguration.Driver = ld + } + if len(s.CgroupParent) == 0 || len(c.CgroupParent) != 0 { + s.CgroupParent = c.CgroupParent + } + if len(s.CgroupsMode) == 0 { + s.CgroupsMode = c.CgroupsMode + } + if s.CgroupsMode == "" { + rtc, err := config.Default() + if err != nil { + return err + } + + s.CgroupsMode = rtc.Cgroups() + } + + if len(s.Groups) == 0 || len(c.GroupAdd) != 0 { + s.Groups = c.GroupAdd + } + + if len(s.Hostname) == 0 || len(c.Hostname) != 0 { + s.Hostname = c.Hostname + } + sysctl := map[string]string{} + if ctl := c.Sysctl; len(ctl) > 0 { + sysctl, err = util.ValidateSysctls(ctl) + if err != nil { + return err + } + } + if len(s.Sysctl) == 0 || len(c.Sysctl) != 0 { + s.Sysctl = sysctl + } + + if len(s.CapAdd) == 0 || len(c.CapAdd) != 0 { + s.CapAdd = c.CapAdd + } + if len(s.CapDrop) == 0 || len(c.CapDrop) != 0 { + s.CapDrop = c.CapDrop + } + if !s.Privileged { + s.Privileged = c.Privileged + } + if !s.ReadOnlyFilesystem { + s.ReadOnlyFilesystem = c.ReadOnly + } + if len(s.ConmonPidFile) == 0 || len(c.ConmonPIDFile) != 0 { + s.ConmonPidFile = c.ConmonPIDFile + } + + if len(s.DependencyContainers) == 0 || len(c.Requires) != 0 { + s.DependencyContainers = c.Requires + } + + // TODO + // outside of specgen and oci though + // defaults to true, check spec/storage + // s.readonly = c.ReadOnlyTmpFS + // TODO convert to map? + // check if key=value and convert + sysmap := make(map[string]string) + for _, ctl := range c.Sysctl { + splitCtl := strings.SplitN(ctl, "=", 2) + if len(splitCtl) < 2 { + return errors.Errorf("invalid sysctl value %q", ctl) + } + sysmap[splitCtl[0]] = splitCtl[1] + } + if len(s.Sysctl) == 0 || len(c.Sysctl) != 0 { + s.Sysctl = sysmap + } + + if c.CIDFile != "" { + s.Annotations[define.InspectAnnotationCIDFile] = c.CIDFile + } + + for _, opt := range c.SecurityOpt { + if opt == "no-new-privileges" { + s.ContainerSecurityConfig.NoNewPrivileges = true + } else { + con := strings.SplitN(opt, "=", 2) + if len(con) != 2 { + return fmt.Errorf("invalid --security-opt 1: %q", opt) + } + switch con[0] { + case "apparmor": + s.ContainerSecurityConfig.ApparmorProfile = con[1] + s.Annotations[define.InspectAnnotationApparmor] = con[1] + case "label": + // TODO selinux opts and label opts are the same thing + s.ContainerSecurityConfig.SelinuxOpts = append(s.ContainerSecurityConfig.SelinuxOpts, con[1]) + s.Annotations[define.InspectAnnotationLabel] = strings.Join(s.ContainerSecurityConfig.SelinuxOpts, ",label=") + case "mask": + s.ContainerSecurityConfig.Mask = append(s.ContainerSecurityConfig.Mask, strings.Split(con[1], ":")...) + case "proc-opts": + s.ProcOpts = strings.Split(con[1], ",") + case "seccomp": + s.SeccompProfilePath = con[1] + s.Annotations[define.InspectAnnotationSeccomp] = con[1] + // this option is for docker compatibility, it is the same as unmask=ALL + case "systempaths": + if con[1] == "unconfined" { + s.ContainerSecurityConfig.Unmask = append(s.ContainerSecurityConfig.Unmask, []string{"ALL"}...) + } else { + return fmt.Errorf("invalid systempaths option %q, only `unconfined` is supported", con[1]) + } + case "unmask": + s.ContainerSecurityConfig.Unmask = append(s.ContainerSecurityConfig.Unmask, con[1:]...) + default: + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + } + } + + if len(s.SeccompPolicy) == 0 || len(c.SeccompPolicy) != 0 { + s.SeccompPolicy = c.SeccompPolicy + } + + if len(s.VolumesFrom) == 0 || len(c.VolumesFrom) != 0 { + s.VolumesFrom = c.VolumesFrom + } + + // Only add read-only tmpfs mounts in case that we are read-only and the + // read-only tmpfs flag has been set. + mounts, volumes, overlayVolumes, imageVolumes, err := parseVolumes(c.Volume, c.Mount, c.TmpFS, c.ReadOnlyTmpFS && c.ReadOnly) + if err != nil { + return err + } + if len(s.Mounts) == 0 || len(c.Mount) != 0 { + s.Mounts = mounts + } + if len(s.Volumes) == 0 || len(c.Volume) != 0 { + s.Volumes = volumes + } + // TODO make sure these work in clone + if len(s.OverlayVolumes) == 0 { + s.OverlayVolumes = overlayVolumes + } + if len(s.ImageVolumes) == 0 { + s.ImageVolumes = imageVolumes + } + + for _, dev := range c.Devices { + s.Devices = append(s.Devices, specs.LinuxDevice{Path: dev}) + } + + for _, rule := range c.DeviceCgroupRule { + dev, err := parseLinuxResourcesDeviceAccess(rule) + if err != nil { + return err + } + s.DeviceCgroupRule = append(s.DeviceCgroupRule, dev) + } + + if !s.Init { + s.Init = c.Init + } + if len(s.InitPath) == 0 || len(c.InitPath) != 0 { + s.InitPath = c.InitPath + } + if !s.Stdin { + s.Stdin = c.Interactive + } + // quiet + // DeviceCgroupRules: c.StringSlice("device-cgroup-rule"), + + // Rlimits/Ulimits + for _, u := range c.Ulimit { + if u == "host" { + s.Rlimits = nil + break + } + ul, err := units.ParseUlimit(u) + if err != nil { + return errors.Wrapf(err, "ulimit option %q requires name=SOFT:HARD, failed to be parsed", u) + } + rl := specs.POSIXRlimit{ + Type: ul.Name, + Hard: uint64(ul.Hard), + Soft: uint64(ul.Soft), + } + s.Rlimits = append(s.Rlimits, rl) + } + + logOpts := make(map[string]string) + for _, o := range c.LogOptions { + split := strings.SplitN(o, "=", 2) + if len(split) < 2 { + return errors.Errorf("invalid log option %q", o) + } + switch strings.ToLower(split[0]) { + case "driver": + s.LogConfiguration.Driver = split[1] + case "path": + s.LogConfiguration.Path = split[1] + case "max-size": + logSize, err := units.FromHumanSize(split[1]) + if err != nil { + return err + } + s.LogConfiguration.Size = logSize + default: + logOpts[split[0]] = split[1] + } + } + if len(s.LogConfiguration.Options) == 0 || len(c.LogOptions) != 0 { + s.LogConfiguration.Options = logOpts + } + if len(s.Name) == 0 || len(c.Name) != 0 { + s.Name = c.Name + } + if s.PreserveFDs == 0 || c.PreserveFDs != 0 { + s.PreserveFDs = c.PreserveFDs + } + + if s.OOMScoreAdj == nil || c.OOMScoreAdj != nil { + s.OOMScoreAdj = c.OOMScoreAdj + } + if c.Restart != "" { + splitRestart := strings.Split(c.Restart, ":") + switch len(splitRestart) { + case 1: + // No retries specified + case 2: + if strings.ToLower(splitRestart[0]) != "on-failure" { + return errors.Errorf("restart policy retries can only be specified with on-failure restart policy") + } + retries, err := strconv.Atoi(splitRestart[1]) + if err != nil { + return errors.Wrapf(err, "error parsing restart policy retry count") + } + if retries < 0 { + return errors.Errorf("must specify restart policy retry count as a number greater than 0") + } + var retriesUint = uint(retries) + s.RestartRetries = &retriesUint + default: + return errors.Errorf("invalid restart policy: may specify retries at most once") + } + s.RestartPolicy = splitRestart[0] + } + + if len(s.Secrets) == 0 || len(c.Secrets) != 0 { + s.Secrets, s.EnvSecrets, err = parseSecrets(c.Secrets) + if err != nil { + return err + } + } + + if c.Personality != "" { + s.Personality = &specs.LinuxPersonality{} + s.Personality.Domain = specs.LinuxPersonalityDomain(c.Personality) + } + + if !s.Remove { + s.Remove = c.Rm + } + if s.StopTimeout == nil || c.StopTimeout != 0 { + s.StopTimeout = &c.StopTimeout + } + if s.Timeout == 0 || c.Timeout != 0 { + s.Timeout = c.Timeout + } + if len(s.Timezone) == 0 || len(c.Timezone) != 0 { + s.Timezone = c.Timezone + } + if len(s.Umask) == 0 || len(c.Umask) != 0 { + s.Umask = c.Umask + } + if len(s.PidFile) == 0 || len(c.PidFile) != 0 { + s.PidFile = c.PidFile + } + if !s.Volatile { + s.Volatile = c.Rm + } + if len(s.UnsetEnv) == 0 || len(c.UnsetEnv) != 0 { + s.UnsetEnv = c.UnsetEnv + } + if !s.UnsetEnvAll { + s.UnsetEnvAll = c.UnsetEnvAll + } + if len(s.ChrootDirs) == 0 || len(c.ChrootDirs) != 0 { + s.ChrootDirs = c.ChrootDirs + } + + // Initcontainers + if len(s.InitContainerType) == 0 || len(c.InitContainerType) != 0 { + s.InitContainerType = c.InitContainerType + } + + t := true + if s.Passwd == nil { + s.Passwd = &t + } + + if len(s.PasswdEntry) == 0 || len(c.PasswdEntry) != 0 { + s.PasswdEntry = c.PasswdEntry + } + + return nil +} + +func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, startPeriod string) (*manifest.Schema2HealthConfig, error) { + cmdArr := []string{} + isArr := true + err := json.Unmarshal([]byte(inCmd), &cmdArr) // array unmarshalling + if err != nil { + cmdArr = strings.SplitN(inCmd, " ", 2) // default for compat + isArr = false + } + // Every healthcheck requires a command + if len(cmdArr) == 0 { + return nil, errors.New("Must define a healthcheck command for all healthchecks") + } + + var concat string + if cmdArr[0] == "CMD" || cmdArr[0] == "none" { // this is for compat, we are already split properly for most compat cases + cmdArr = strings.Fields(inCmd) + } else if cmdArr[0] != "CMD-SHELL" { // this is for podman side of things, won't contain the keywords + if isArr && len(cmdArr) > 1 { // an array of consecutive commands + cmdArr = append([]string{"CMD"}, cmdArr...) + } else { // one singular command + if len(cmdArr) == 1 { + concat = cmdArr[0] + } else { + concat = strings.Join(cmdArr[0:], " ") + } + cmdArr = append([]string{"CMD-SHELL"}, concat) + } + } + + if cmdArr[0] == "none" { // if specified to remove healtcheck + cmdArr = []string{"NONE"} + } + + // healthcheck is by default an array, so we simply pass the user input + hc := manifest.Schema2HealthConfig{ + Test: cmdArr, + } + + if interval == "disable" { + interval = "0" + } + intervalDuration, err := time.ParseDuration(interval) + if err != nil { + return nil, errors.Wrapf(err, "invalid healthcheck-interval") + } + + hc.Interval = intervalDuration + + if retries < 1 { + return nil, errors.New("healthcheck-retries must be greater than 0") + } + hc.Retries = int(retries) + timeoutDuration, err := time.ParseDuration(timeout) + if err != nil { + return nil, errors.Wrapf(err, "invalid healthcheck-timeout") + } + if timeoutDuration < time.Duration(1) { + return nil, errors.New("healthcheck-timeout must be at least 1 second") + } + hc.Timeout = timeoutDuration + + startPeriodDuration, err := time.ParseDuration(startPeriod) + if err != nil { + return nil, errors.Wrapf(err, "invalid healthcheck-start-period") + } + if startPeriodDuration < time.Duration(0) { + return nil, errors.New("healthcheck-start-period must be 0 seconds or greater") + } + hc.StartPeriod = startPeriodDuration + + return &hc, nil +} + +func parseWeightDevices(weightDevs []string) (map[string]specs.LinuxWeightDevice, error) { + wd := make(map[string]specs.LinuxWeightDevice) + for _, val := range weightDevs { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + weight, err := strconv.ParseUint(split[1], 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + if weight > 0 && (weight < 10 || weight > 1000) { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + w := uint16(weight) + wd[split[0]] = specs.LinuxWeightDevice{ + Weight: &w, + LeafWeight: nil, + } + } + return wd, nil +} + +func parseThrottleBPSDevices(bpsDevices []string) (map[string]specs.LinuxThrottleDevice, error) { + td := make(map[string]specs.LinuxThrottleDevice) + for _, val := range bpsDevices { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := units.RAMInBytes(split[1]) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + td[split[0]] = specs.LinuxThrottleDevice{Rate: uint64(rate)} + } + return td, nil +} + +func parseThrottleIOPsDevices(iopsDevices []string) (map[string]specs.LinuxThrottleDevice, error) { + td := make(map[string]specs.LinuxThrottleDevice) + for _, val := range iopsDevices { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := strconv.ParseUint(split[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + td[split[0]] = specs.LinuxThrottleDevice{Rate: rate} + } + return td, nil +} + +func parseSecrets(secrets []string) ([]specgen.Secret, map[string]string, error) { + secretParseError := errors.New("parsing secret") + var mount []specgen.Secret + envs := make(map[string]string) + for _, val := range secrets { + // mount only tells if user has set an option that can only be used with mount secret type + mountOnly := false + source := "" + secretType := "" + target := "" + var uid, gid uint32 + // default mode 444 octal = 292 decimal + var mode uint32 = 292 + split := strings.Split(val, ",") + + // --secret mysecret + if len(split) == 1 { + mountSecret := specgen.Secret{ + Source: val, + Target: target, + UID: uid, + GID: gid, + Mode: mode, + } + mount = append(mount, mountSecret) + continue + } + // --secret mysecret,opt=opt + if !strings.Contains(split[0], "=") { + source = split[0] + split = split[1:] + } + + for _, val := range split { + kv := strings.SplitN(val, "=", 2) + if len(kv) < 2 { + return nil, nil, errors.Wrapf(secretParseError, "option %s must be in form option=value", val) + } + switch kv[0] { + case "source": + source = kv[1] + case "type": + if secretType != "" { + return nil, nil, errors.Wrap(secretParseError, "cannot set more than one secret type") + } + if kv[1] != "mount" && kv[1] != "env" { + return nil, nil, errors.Wrapf(secretParseError, "type %s is invalid", kv[1]) + } + secretType = kv[1] + case "target": + target = kv[1] + case "mode": + mountOnly = true + mode64, err := strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return nil, nil, errors.Wrapf(secretParseError, "mode %s invalid", kv[1]) + } + mode = uint32(mode64) + case "uid", "UID": + mountOnly = true + uid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, nil, errors.Wrapf(secretParseError, "UID %s invalid", kv[1]) + } + uid = uint32(uid64) + case "gid", "GID": + mountOnly = true + gid64, err := strconv.ParseUint(kv[1], 10, 32) + if err != nil { + return nil, nil, errors.Wrapf(secretParseError, "GID %s invalid", kv[1]) + } + gid = uint32(gid64) + + default: + return nil, nil, errors.Wrapf(secretParseError, "option %s invalid", val) + } + } + + if secretType == "" { + secretType = "mount" + } + if source == "" { + return nil, nil, errors.Wrapf(secretParseError, "no source found %s", val) + } + if secretType == "mount" { + mountSecret := specgen.Secret{ + Source: source, + Target: target, + UID: uid, + GID: gid, + Mode: mode, + } + mount = append(mount, mountSecret) + } + if secretType == "env" { + if mountOnly { + return nil, nil, errors.Wrap(secretParseError, "UID, GID, Mode options cannot be set with secret type env") + } + if target == "" { + target = source + } + envs[target] = source + } + } + return mount, envs, nil +} + +var cgroupDeviceType = map[string]bool{ + "a": true, // all + "b": true, // block device + "c": true, // character device +} + +var cgroupDeviceAccess = map[string]bool{ + "r": true, // read + "w": true, // write + "m": true, // mknod +} + +// parseLinuxResourcesDeviceAccess parses the raw string passed with the --device-access-add flag +func parseLinuxResourcesDeviceAccess(device string) (specs.LinuxDeviceCgroup, error) { + var devType, access string + var major, minor *int64 + + value := strings.Split(device, " ") + if len(value) != 3 { + return specs.LinuxDeviceCgroup{}, fmt.Errorf("invalid device cgroup rule requires type, major:Minor, and access rules: %q", device) + } + + devType = value[0] + if !cgroupDeviceType[devType] { + return specs.LinuxDeviceCgroup{}, fmt.Errorf("invalid device type in device-access-add: %s", devType) + } + + number := strings.SplitN(value[1], ":", 2) + i, err := strconv.ParseInt(number[0], 10, 64) + if err != nil { + return specs.LinuxDeviceCgroup{}, err + } + major = &i + if len(number) == 2 && number[1] != "*" { + i, err := strconv.ParseInt(number[1], 10, 64) + if err != nil { + return specs.LinuxDeviceCgroup{}, err + } + minor = &i + } + access = value[2] + for _, c := range strings.Split(access, "") { + if !cgroupDeviceAccess[c] { + return specs.LinuxDeviceCgroup{}, fmt.Errorf("invalid device access in device-access-add: %s", c) + } + } + return specs.LinuxDeviceCgroup{ + Allow: true, + Type: devType, + Major: major, + Minor: minor, + Access: access, + }, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go new file mode 100644 index 00000000000..fa2e904570a --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go @@ -0,0 +1,315 @@ +package specgenutil + +import ( + "io/ioutil" + "net" + "os" + "strconv" + "strings" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + storageTypes "github.com/containers/storage/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ReadPodIDFile reads the specified file and returns its content (i.e., first +// line). +func ReadPodIDFile(path string) (string, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return "", errors.Wrap(err, "error reading pod ID file") + } + return strings.Split(string(content), "\n")[0], nil +} + +// ReadPodIDFiles reads the specified files and returns their content (i.e., +// first line). +func ReadPodIDFiles(files []string) ([]string, error) { + ids := []string{} + for _, file := range files { + id, err := ReadPodIDFile(file) + if err != nil { + return nil, err + } + ids = append(ids, id) + } + return ids, nil +} + +// CreateExpose parses user-provided exposed port definitions and converts them +// into SpecGen format. +// TODO: The SpecGen format should really handle ranges more sanely - we could +// be massively inflating what is sent over the wire with a large range. +func CreateExpose(expose []string) (map[uint16]string, error) { + toReturn := make(map[uint16]string) + + for _, e := range expose { + // Check for protocol + proto := "tcp" + splitProto := strings.Split(e, "/") + if len(splitProto) > 2 { + return nil, errors.Errorf("invalid expose format - protocol can only be specified once") + } else if len(splitProto) == 2 { + proto = splitProto[1] + } + + // Check for a range + start, len, err := parseAndValidateRange(splitProto[0]) + if err != nil { + return nil, err + } + + var index uint16 + for index = 0; index < len; index++ { + portNum := start + index + protocols, ok := toReturn[portNum] + if !ok { + toReturn[portNum] = proto + } else { + newProto := strings.Join(append(strings.Split(protocols, ","), strings.Split(proto, ",")...), ",") + toReturn[portNum] = newProto + } + } + } + + return toReturn, nil +} + +// CreatePortBindings iterates ports mappings into SpecGen format. +func CreatePortBindings(ports []string) ([]types.PortMapping, error) { + // --publish is formatted as follows: + // [[hostip:]hostport[-endPort]:]containerport[-endPort][/protocol] + toReturn := make([]types.PortMapping, 0, len(ports)) + + for _, p := range ports { + var ( + ctrPort string + proto, hostIP, hostPort *string + ) + + splitProto := strings.Split(p, "/") + switch len(splitProto) { + case 1: + // No protocol was provided + case 2: + proto = &(splitProto[1]) + default: + return nil, errors.Errorf("invalid port format - protocol can only be specified once") + } + + remainder := splitProto[0] + haveV6 := false + + // Check for an IPv6 address in brackets + splitV6 := strings.Split(remainder, "]") + switch len(splitV6) { + case 1: + // Do nothing, proceed as before + case 2: + // We potentially have an IPv6 address + haveV6 = true + if !strings.HasPrefix(splitV6[0], "[") { + return nil, errors.Errorf("invalid port format - IPv6 addresses must be enclosed by []") + } + if !strings.HasPrefix(splitV6[1], ":") { + return nil, errors.Errorf("invalid port format - IPv6 address must be followed by a colon (':')") + } + ipNoPrefix := strings.TrimPrefix(splitV6[0], "[") + hostIP = &ipNoPrefix + remainder = strings.TrimPrefix(splitV6[1], ":") + default: + return nil, errors.Errorf("invalid port format - at most one IPv6 address can be specified in a --publish") + } + + splitPort := strings.Split(remainder, ":") + switch len(splitPort) { + case 1: + if haveV6 { + return nil, errors.Errorf("invalid port format - must provide host and destination port if specifying an IP") + } + ctrPort = splitPort[0] + case 2: + hostPort = &(splitPort[0]) + ctrPort = splitPort[1] + case 3: + if haveV6 { + return nil, errors.Errorf("invalid port format - when v6 address specified, must be [ipv6]:hostPort:ctrPort") + } + hostIP = &(splitPort[0]) + hostPort = &(splitPort[1]) + ctrPort = splitPort[2] + default: + return nil, errors.Errorf("invalid port format - format is [[hostIP:]hostPort:]containerPort") + } + + newPort, err := parseSplitPort(hostIP, hostPort, ctrPort, proto) + if err != nil { + return nil, err + } + + toReturn = append(toReturn, newPort) + } + + return toReturn, nil +} + +// parseSplitPort parses individual components of the --publish flag to produce +// a single port mapping in SpecGen format. +func parseSplitPort(hostIP, hostPort *string, ctrPort string, protocol *string) (types.PortMapping, error) { + newPort := types.PortMapping{} + if ctrPort == "" { + return newPort, errors.Errorf("must provide a non-empty container port to publish") + } + ctrStart, ctrLen, err := parseAndValidateRange(ctrPort) + if err != nil { + return newPort, errors.Wrapf(err, "error parsing container port") + } + newPort.ContainerPort = ctrStart + newPort.Range = ctrLen + + if protocol != nil { + if *protocol == "" { + return newPort, errors.Errorf("must provide a non-empty protocol to publish") + } + newPort.Protocol = *protocol + } + if hostIP != nil { + if *hostIP == "" { + return newPort, errors.Errorf("must provide a non-empty container host IP to publish") + } else if *hostIP != "0.0.0.0" { + // If hostIP is 0.0.0.0, leave it unset - CNI treats + // 0.0.0.0 and empty differently, Docker does not. + testIP := net.ParseIP(*hostIP) + if testIP == nil { + return newPort, errors.Errorf("cannot parse %q as an IP address", *hostIP) + } + newPort.HostIP = testIP.String() + } + } + if hostPort != nil { + if *hostPort == "" { + // Set 0 as a placeholder. The server side of Specgen + // will find a random, open, unused port to use. + newPort.HostPort = 0 + } else { + hostStart, hostLen, err := parseAndValidateRange(*hostPort) + if err != nil { + return newPort, errors.Wrapf(err, "error parsing host port") + } + if hostLen != ctrLen { + return newPort, errors.Errorf("host and container port ranges have different lengths: %d vs %d", hostLen, ctrLen) + } + newPort.HostPort = hostStart + } + } + + hport := newPort.HostPort + logrus.Debugf("Adding port mapping from %d to %d length %d protocol %q", hport, newPort.ContainerPort, newPort.Range, newPort.Protocol) + + return newPort, nil +} + +// Parse and validate a port range. +// Returns start port, length of range, error. +func parseAndValidateRange(portRange string) (uint16, uint16, error) { + splitRange := strings.Split(portRange, "-") + if len(splitRange) > 2 { + return 0, 0, errors.Errorf("invalid port format - port ranges are formatted as startPort-stopPort") + } + + if splitRange[0] == "" { + return 0, 0, errors.Errorf("port numbers cannot be negative") + } + + startPort, err := parseAndValidatePort(splitRange[0]) + if err != nil { + return 0, 0, err + } + + var rangeLen uint16 = 1 + if len(splitRange) == 2 { + if splitRange[1] == "" { + return 0, 0, errors.Errorf("must provide ending number for port range") + } + endPort, err := parseAndValidatePort(splitRange[1]) + if err != nil { + return 0, 0, err + } + if endPort <= startPort { + return 0, 0, errors.Errorf("the end port of a range must be higher than the start port - %d is not higher than %d", endPort, startPort) + } + // Our range is the total number of ports + // involved, so we need to add 1 (8080:8081 is + // 2 ports, for example, not 1) + rangeLen = endPort - startPort + 1 + } + + return startPort, rangeLen, nil +} + +// Turn a single string into a valid U16 port. +func parseAndValidatePort(port string) (uint16, error) { + num, err := strconv.Atoi(port) + if err != nil { + return 0, errors.Wrapf(err, "invalid port number") + } + if num < 1 || num > 65535 { + return 0, errors.Errorf("port numbers must be between 1 and 65535 (inclusive), got %d", num) + } + return uint16(num), nil +} + +func CreateExitCommandArgs(storageConfig storageTypes.StoreOptions, config *config.Config, syslog, rm, exec bool) ([]string, error) { + // We need a cleanup process for containers in the current model. + // But we can't assume that the caller is Podman - it could be another + // user of the API. + // As such, provide a way to specify a path to Podman, so we can + // still invoke a cleanup process. + + podmanPath, err := os.Executable() + if err != nil { + return nil, err + } + + command := []string{podmanPath, + "--root", storageConfig.GraphRoot, + "--runroot", storageConfig.RunRoot, + "--log-level", logrus.GetLevel().String(), + "--cgroup-manager", config.Engine.CgroupManager, + "--tmpdir", config.Engine.TmpDir, + "--network-config-dir", config.Network.NetworkConfigDir, + "--network-backend", config.Network.NetworkBackend, + "--volumepath", config.Engine.VolumePath, + } + if config.Engine.OCIRuntime != "" { + command = append(command, []string{"--runtime", config.Engine.OCIRuntime}...) + } + if storageConfig.GraphDriverName != "" { + command = append(command, []string{"--storage-driver", storageConfig.GraphDriverName}...) + } + for _, opt := range storageConfig.GraphDriverOptions { + command = append(command, []string{"--storage-opt", opt}...) + } + if config.Engine.EventsLogger != "" { + command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...) + } + + if syslog { + command = append(command, "--syslog") + } + command = append(command, []string{"container", "cleanup"}...) + + if rm { + command = append(command, "--rm") + } + + // This has to be absolutely last, to ensure that the exec session ID + // will be added after it by Libpod. + if exec { + command = append(command, "--exec") + } + + return command, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go new file mode 100644 index 00000000000..50d74538090 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go @@ -0,0 +1,707 @@ +package specgenutil + +import ( + "encoding/csv" + "fmt" + "path" + "strings" + + "github.com/containers/common/pkg/parse" + "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/pkg/util" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +var ( + errDuplicateDest = errors.Errorf("duplicate mount destination") + optionArgError = errors.Errorf("must provide an argument for option") + noDestError = errors.Errorf("must set volume destination") + errInvalidSyntax = errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") +) + +// Parse all volume-related options in the create config into a set of mounts +// and named volumes to add to the container. +// Handles --volumes, --mount, and --tmpfs flags. +// Does not handle image volumes, init, and --volumes-from flags. +// Can also add tmpfs mounts from read-only tmpfs. +// TODO: handle options parsing/processing via containers/storage/pkg/mount +func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bool) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, []*specgen.ImageVolume, error) { + // Get mounts from the --mounts flag. + unifiedMounts, unifiedVolumes, unifiedImageVolumes, err := Mounts(mountFlag) + if err != nil { + return nil, nil, nil, nil, err + } + + // Next --volumes flag. + volumeMounts, volumeVolumes, overlayVolumes, err := specgen.GenVolumeMounts(volumeFlag) + if err != nil { + return nil, nil, nil, nil, err + } + + // Next --tmpfs flag. + tmpfsMounts, err := getTmpfsMounts(tmpfsFlag) + if err != nil { + return nil, nil, nil, nil, err + } + + // Unify mounts from --mount, --volume, --tmpfs. + // Start with --volume. + for dest, mount := range volumeMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest) + } + unifiedMounts[dest] = mount + } + for dest, volume := range volumeVolumes { + if _, ok := unifiedVolumes[dest]; ok { + return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest) + } + unifiedVolumes[dest] = volume + } + // Now --tmpfs + for dest, tmpfs := range tmpfsMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, nil, nil, nil, errors.Wrapf(errDuplicateDest, dest) + } + unifiedMounts[dest] = tmpfs + } + + // If requested, add tmpfs filesystems for read-only containers. + if addReadOnlyTmpfs { + readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"} + options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"} + for _, dest := range readonlyTmpfs { + if _, ok := unifiedMounts[dest]; ok { + continue + } + if _, ok := unifiedVolumes[dest]; ok { + continue + } + unifiedMounts[dest] = spec.Mount{ + Destination: dest, + Type: define.TypeTmpfs, + Source: "tmpfs", + Options: options, + } + } + } + + // Check for conflicts between named volumes, overlay & image volumes, + // and mounts + allMounts := make(map[string]bool) + testAndSet := func(dest string) error { + if _, ok := allMounts[dest]; ok { + return errors.Wrapf(errDuplicateDest, "conflict at mount destination %v", dest) + } + allMounts[dest] = true + return nil + } + for dest := range unifiedMounts { + if err := testAndSet(dest); err != nil { + return nil, nil, nil, nil, err + } + } + for dest := range unifiedVolumes { + if err := testAndSet(dest); err != nil { + return nil, nil, nil, nil, err + } + } + for dest := range overlayVolumes { + if err := testAndSet(dest); err != nil { + return nil, nil, nil, nil, err + } + } + for dest := range unifiedImageVolumes { + if err := testAndSet(dest); err != nil { + return nil, nil, nil, nil, err + } + } + + // Final step: maps to arrays + finalMounts := make([]spec.Mount, 0, len(unifiedMounts)) + for _, mount := range unifiedMounts { + if mount.Type == define.TypeBind { + absSrc, err := specgen.ConvertWinMountPath(mount.Source) + if err != nil { + return nil, nil, nil, nil, errors.Wrapf(err, "error getting absolute path of %s", mount.Source) + } + mount.Source = absSrc + } + finalMounts = append(finalMounts, mount) + } + finalVolumes := make([]*specgen.NamedVolume, 0, len(unifiedVolumes)) + for _, volume := range unifiedVolumes { + finalVolumes = append(finalVolumes, volume) + } + finalOverlayVolume := make([]*specgen.OverlayVolume, 0) + for _, volume := range overlayVolumes { + finalOverlayVolume = append(finalOverlayVolume, volume) + } + finalImageVolumes := make([]*specgen.ImageVolume, 0, len(unifiedImageVolumes)) + for _, volume := range unifiedImageVolumes { + finalImageVolumes = append(finalImageVolumes, volume) + } + + return finalMounts, finalVolumes, finalOverlayVolume, finalImageVolumes, nil +} + +// findMountType parses the input and extracts the type of the mount type and +// the remaining non-type tokens. +func findMountType(input string) (mountType string, tokens []string, err error) { + // Split by comma, iterate over the slice and look for + // "type=$mountType". Everything else is appended to tokens. + found := false + csvReader := csv.NewReader(strings.NewReader(input)) + records, err := csvReader.ReadAll() + if err != nil { + return "", nil, err + } + if len(records) != 1 { + return "", nil, errInvalidSyntax + } + for _, s := range records[0] { + kv := strings.Split(s, "=") + if found || !(len(kv) == 2 && kv[0] == "type") { + tokens = append(tokens, s) + continue + } + mountType = kv[1] + found = true + } + if !found { + err = errInvalidSyntax + } + return +} + +// Mounts takes user-provided input from the --mount flag and creates OCI +// spec mounts and Libpod named volumes. +// podman run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// podman run --mount type=tmpfs,target=/dev/shm ... +// podman run --mount type=volume,source=test-volume, ... +func Mounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.NamedVolume, map[string]*specgen.ImageVolume, error) { + finalMounts := make(map[string]spec.Mount) + finalNamedVolumes := make(map[string]*specgen.NamedVolume) + finalImageVolumes := make(map[string]*specgen.ImageVolume) + + for _, mount := range mountFlag { + // TODO: Docker defaults to "volume" if no mount type is specified. + mountType, tokens, err := findMountType(mount) + if err != nil { + return nil, nil, nil, err + } + switch mountType { + case define.TypeBind: + mount, err := getBindMount(tokens) + if err != nil { + return nil, nil, nil, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + case define.TypeTmpfs: + mount, err := getTmpfsMount(tokens) + if err != nil { + return nil, nil, nil, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + case define.TypeDevpts: + mount, err := getDevptsMount(tokens) + if err != nil { + return nil, nil, nil, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + case "image": + volume, err := getImageVolume(tokens) + if err != nil { + return nil, nil, nil, err + } + if _, ok := finalImageVolumes[volume.Destination]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, volume.Destination) + } + finalImageVolumes[volume.Destination] = volume + case "volume": + volume, err := getNamedVolume(tokens) + if err != nil { + return nil, nil, nil, err + } + if _, ok := finalNamedVolumes[volume.Dest]; ok { + return nil, nil, nil, errors.Wrapf(errDuplicateDest, volume.Dest) + } + finalNamedVolumes[volume.Dest] = volume + default: + return nil, nil, nil, errors.Errorf("invalid filesystem type %q", mountType) + } + } + + return finalMounts, finalNamedVolumes, finalImageVolumes, nil +} + +// Parse a single bind mount entry from the --mount flag. +func getBindMount(args []string) (spec.Mount, error) { + newMount := spec.Mount{ + Type: define.TypeBind, + } + + var setSource, setDest, setRORW, setSuid, setDev, setExec, setRelabel, setOwnership bool + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") + case "readonly", "ro", "rw": + if setRORW { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'readonly', 'ro', or 'rw' options more than once") + } + setRORW = true + // Can be formatted as one of: + // readonly + // readonly=[true|false] + // ro + // ro=[true|false] + // rw + // rw=[true|false] + if kv[0] == "readonly" { + kv[0] = "ro" + } + switch len(kv) { + case 1: + newMount.Options = append(newMount.Options, kv[0]) + case 2: + switch strings.ToLower(kv[1]) { + case "true": + newMount.Options = append(newMount.Options, kv[0]) + case "false": + // Set the opposite only for rw + // ro's opposite is the default + if kv[0] == "rw" { + newMount.Options = append(newMount.Options, "ro") + } + default: + return newMount, errors.Wrapf(optionArgError, "'readonly', 'ro', or 'rw' must be set to true or false, instead received %q", kv[1]) + } + default: + return newMount, errors.Wrapf(optionArgError, "badly formatted option %q", val) + } + case "nosuid", "suid": + if setSuid { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newMount.Options = append(newMount.Options, kv[0]) + case "nodev", "dev": + if setDev { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newMount.Options = append(newMount.Options, kv[0]) + case "noexec", "exec": + if setExec { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true + newMount.Options = append(newMount.Options, kv[0]) + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "unbindable", "runbindable", "Z", "z": + newMount.Options = append(newMount.Options, kv[0]) + case "bind-propagation": + if len(kv) == 1 { + return newMount, errors.Wrapf(optionArgError, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "src", "source": + if len(kv) == 1 { + return newMount, errors.Wrapf(optionArgError, kv[0]) + } + if len(kv[1]) == 0 { + return newMount, errors.Wrapf(optionArgError, "host directory cannot be empty") + } + newMount.Source = kv[1] + setSource = true + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(optionArgError, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = unixPathClean(kv[1]) + setDest = true + case "relabel": + if setRelabel { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'relabel' option more than once") + } + setRelabel = true + if len(kv) != 2 { + return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0]) + } + switch kv[1] { + case "private": + newMount.Options = append(newMount.Options, "Z") + case "shared": + newMount.Options = append(newMount.Options, "z") + default: + return newMount, errors.Wrapf(util.ErrBadMntOption, "%s mount option must be 'private' or 'shared'", kv[0]) + } + case "U", "chown": + if setOwnership { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'U' or 'chown' option more than once") + } + ok, err := validChownFlag(val) + if err != nil { + return newMount, err + } + if ok { + newMount.Options = append(newMount.Options, "U") + } + setOwnership = true + case "idmap": + if len(kv) > 1 { + newMount.Options = append(newMount.Options, fmt.Sprintf("idmap=%s", kv[1])) + } else { + newMount.Options = append(newMount.Options, "idmap") + } + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue + default: + return newMount, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0]) + } + } + + if !setDest { + return newMount, noDestError + } + + if !setSource { + newMount.Source = newMount.Destination + } + + options, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, err + } + newMount.Options = options + return newMount, nil +} + +// Parse a single tmpfs mount entry from the --mount flag +func getTmpfsMount(args []string) (spec.Mount, error) { + newMount := spec.Mount{ + Type: define.TypeTmpfs, + Source: define.TypeTmpfs, + } + + var setDest, setRORW, setSuid, setDev, setExec, setTmpcopyup, setOwnership bool + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "tmpcopyup", "notmpcopyup": + if setTmpcopyup { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'tmpcopyup' and 'notmpcopyup' options more than once") + } + setTmpcopyup = true + newMount.Options = append(newMount.Options, kv[0]) + case "ro", "rw": + if setRORW { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once") + } + setRORW = true + newMount.Options = append(newMount.Options, kv[0]) + case "nosuid", "suid": + if setSuid { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newMount.Options = append(newMount.Options, kv[0]) + case "nodev", "dev": + if setDev { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newMount.Options = append(newMount.Options, kv[0]) + case "noexec", "exec": + if setExec { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true + newMount.Options = append(newMount.Options, kv[0]) + case "tmpfs-mode": + if len(kv) == 1 { + return newMount, errors.Wrapf(optionArgError, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + case "tmpfs-size": + if len(kv) == 1 { + return newMount, errors.Wrapf(optionArgError, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + case "src", "source": + return newMount, errors.Errorf("source is not supported with tmpfs mounts") + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(optionArgError, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = unixPathClean(kv[1]) + setDest = true + case "U", "chown": + if setOwnership { + return newMount, errors.Wrapf(optionArgError, "cannot pass 'U' or 'chown' option more than once") + } + ok, err := validChownFlag(val) + if err != nil { + return newMount, err + } + if ok { + newMount.Options = append(newMount.Options, "U") + } + setOwnership = true + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue + default: + return newMount, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0]) + } + } + + if !setDest { + return newMount, noDestError + } + + return newMount, nil +} + +// Parse a single devpts mount entry from the --mount flag +func getDevptsMount(args []string) (spec.Mount, error) { + newMount := spec.Mount{ + Type: define.TypeDevpts, + Source: define.TypeDevpts, + } + + var setDest bool + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "uid", "gid", "mode", "ptxmode", "newinstance", "max": + newMount.Options = append(newMount.Options, val) + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(optionArgError, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = unixPathClean(kv[1]) + setDest = true + default: + return newMount, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0]) + } + } + + if !setDest { + return newMount, noDestError + } + + return newMount, nil +} + +// Parse a single volume mount entry from the --mount flag. +// Note that the volume-label option for named volumes is currently NOT supported. +// TODO: add support for --volume-label +func getNamedVolume(args []string) (*specgen.NamedVolume, error) { + newVolume := new(specgen.NamedVolume) + + var setDest, setRORW, setSuid, setDev, setExec, setOwnership bool + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "volume-opt": + newVolume.Options = append(newVolume.Options, val) + case "ro", "rw": + if setRORW { + return nil, errors.Wrapf(optionArgError, "cannot pass 'ro' and 'rw' options more than once") + } + setRORW = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "nosuid", "suid": + if setSuid { + return nil, errors.Wrapf(optionArgError, "cannot pass 'nosuid' and 'suid' options more than once") + } + setSuid = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "nodev", "dev": + if setDev { + return nil, errors.Wrapf(optionArgError, "cannot pass 'nodev' and 'dev' options more than once") + } + setDev = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "noexec", "exec": + if setExec { + return nil, errors.Wrapf(optionArgError, "cannot pass 'noexec' and 'exec' options more than once") + } + setExec = true + newVolume.Options = append(newVolume.Options, kv[0]) + case "volume-label": + return nil, errors.Errorf("the --volume-label option is not presently implemented") + case "src", "source": + if len(kv) == 1 { + return nil, errors.Wrapf(optionArgError, kv[0]) + } + newVolume.Name = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return nil, errors.Wrapf(optionArgError, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return nil, err + } + newVolume.Dest = unixPathClean(kv[1]) + setDest = true + case "U", "chown": + if setOwnership { + return newVolume, errors.Wrapf(optionArgError, "cannot pass 'U' or 'chown' option more than once") + } + ok, err := validChownFlag(val) + if err != nil { + return newVolume, err + } + if ok { + newVolume.Options = append(newVolume.Options, "U") + } + setOwnership = true + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue + default: + return nil, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0]) + } + } + + if !setDest { + return nil, noDestError + } + + return newVolume, nil +} + +// Parse the arguments into an image volume. An image volume is a volume based +// on a container image. The container image is first mounted on the host and +// is then bind-mounted into the container. An ImageVolume is always mounted +// read only. +func getImageVolume(args []string) (*specgen.ImageVolume, error) { + newVolume := new(specgen.ImageVolume) + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "src", "source": + if len(kv) == 1 { + return nil, errors.Wrapf(optionArgError, kv[0]) + } + newVolume.Source = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return nil, errors.Wrapf(optionArgError, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return nil, err + } + newVolume.Destination = unixPathClean(kv[1]) + case "rw", "readwrite": + switch kv[1] { + case "true": + newVolume.ReadWrite = true + case "false": + // Nothing to do. RO is default. + default: + return nil, errors.Wrapf(util.ErrBadMntOption, "invalid rw value %q", kv[1]) + } + case "consistency": + // Often used on MACs and mistakenly on Linux platforms. + // Since Docker ignores this option so shall we. + continue + default: + return nil, errors.Wrapf(util.ErrBadMntOption, "%s", kv[0]) + } + } + + if len(newVolume.Source)*len(newVolume.Destination) == 0 { + return nil, errors.Errorf("must set source and destination for image volume") + } + + return newVolume, nil +} + +// GetTmpfsMounts creates spec.Mount structs for user-requested tmpfs mounts +func getTmpfsMounts(tmpfsFlag []string) (map[string]spec.Mount, error) { + m := make(map[string]spec.Mount) + for _, i := range tmpfsFlag { + // Default options if nothing passed + var options []string + spliti := strings.Split(i, ":") + destPath := spliti[0] + if err := parse.ValidateVolumeCtrDir(spliti[0]); err != nil { + return nil, err + } + if len(spliti) > 1 { + options = strings.Split(spliti[1], ",") + } + + if _, ok := m[destPath]; ok { + return nil, errors.Wrapf(errDuplicateDest, destPath) + } + + mount := spec.Mount{ + Destination: unixPathClean(destPath), + Type: define.TypeTmpfs, + Options: options, + Source: define.TypeTmpfs, + } + m[destPath] = mount + } + return m, nil +} + +// validChownFlag ensures that the U or chown flag is correctly used +func validChownFlag(flag string) (bool, error) { + kv := strings.SplitN(flag, "=", 2) + switch len(kv) { + case 1: + case 2: + // U=[true|false] + switch strings.ToLower(kv[1]) { + case "true": + case "false": + return false, nil + default: + return false, errors.Wrapf(optionArgError, "'U' or 'chown' must be set to true or false, instead received %q", kv[1]) + } + default: + return false, errors.Wrapf(optionArgError, "badly formatted option %q", flag) + } + + return true, nil +} + +// Use path instead of filepath to preserve Unix style paths on Windows +func unixPathClean(p string) string { + return path.Clean(p) +} diff --git a/vendor/github.com/containers/podman/v3/pkg/systemd/activation.go b/vendor/github.com/containers/podman/v4/pkg/systemd/activation.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/systemd/activation.go rename to vendor/github.com/containers/podman/v4/pkg/systemd/activation.go diff --git a/vendor/github.com/containers/podman/v3/pkg/systemd/dbus.go b/vendor/github.com/containers/podman/v4/pkg/systemd/dbus.go similarity index 86% rename from vendor/github.com/containers/podman/v3/pkg/systemd/dbus.go rename to vendor/github.com/containers/podman/v4/pkg/systemd/dbus.go index c49f537b6c8..6887a466ece 100644 --- a/vendor/github.com/containers/podman/v3/pkg/systemd/dbus.go +++ b/vendor/github.com/containers/podman/v4/pkg/systemd/dbus.go @@ -1,12 +1,13 @@ package systemd import ( + "context" "fmt" "os" "path/filepath" "strconv" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/pkg/rootless" "github.com/coreos/go-systemd/v22/dbus" godbus "github.com/godbus/dbus/v5" "github.com/sirupsen/logrus" @@ -24,41 +25,40 @@ func IsSystemdSessionValid(uid int) bool { if rootless.IsRootless() { conn, err = GetLogindConnection(rootless.GetRootlessUID()) - object = conn.Object(dbusDest, godbus.ObjectPath(dbusPath)) if err != nil { - //unable to fetch systemd object for logind + // unable to fetch systemd object for logind logrus.Debugf("systemd-logind: %s", err) return false } object = conn.Object(dbusDest, godbus.ObjectPath(dbusPath)) if err := object.Call(dbusInterface+".GetSeat", 0, "seat0").Store(&seat0Path); err != nil { - //unable to get seat0 path. + // unable to get seat0 path. logrus.Debugf("systemd-logind: %s", err) return false } seat0Obj := conn.Object(dbusDest, seat0Path) activeSession, err := seat0Obj.GetProperty(dbusDest + ".Seat.ActiveSession") if err != nil { - //unable to get active sessions. + // unable to get active sessions. logrus.Debugf("systemd-logind: %s", err) return false } activeSessionMap, ok := activeSession.Value().([]interface{}) if !ok || len(activeSessionMap) < 2 { - //unable to get active session map. + // unable to get active session map. logrus.Debugf("systemd-logind: %s", err) return false } activeSessionPath, ok := activeSessionMap[1].(godbus.ObjectPath) if !ok { - //unable to fetch active session path. + // unable to fetch active session path. logrus.Debugf("systemd-logind: %s", err) return false } activeSessionObj := conn.Object(dbusDest, activeSessionPath) sessionUser, err := activeSessionObj.GetProperty(dbusDest + ".Session.User") if err != nil { - //unable to fetch session user from activeSession path. + // unable to fetch session user from activeSession path. logrus.Debugf("systemd-logind: %s", err) return false } @@ -75,7 +75,7 @@ func IsSystemdSessionValid(uid int) bool { if !ok { return false } - //active session found which belongs to following rootless user + // active session found which belongs to following rootless user if activeUID == uint32(uid) { return true } @@ -84,7 +84,7 @@ func IsSystemdSessionValid(uid int) bool { return true } -// GetDbusConnection returns an user connection to D-BUS +// GetDbusConnection returns a user connection to D-BUS func GetLogindConnection(uid int) (*godbus.Conn, error) { return dbusAuthConnectionLogind(uid) } @@ -141,5 +141,5 @@ func ConnectToDBUS() (*dbus.Conn, error) { if rootless.IsRootless() { return newRootlessConnection() } - return dbus.NewSystemdConnection() + return dbus.NewSystemdConnectionContext(context.Background()) } diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/define/const.go b/vendor/github.com/containers/podman/v4/pkg/systemd/define/const.go new file mode 100644 index 00000000000..6bab8b62990 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/systemd/define/const.go @@ -0,0 +1,14 @@ +package define + +const ( + // Default restart policy for generated unit files. + DefaultRestartPolicy = "on-failure" + + // EnvVariable "PODMAN_SYSTEMD_UNIT" is set in all generated systemd units and + // is set to the unit's (unique) name. + EnvVariable = "PODMAN_SYSTEMD_UNIT" +) + +// RestartPolicies includes all valid restart policies to be used in a unit +// file. +var RestartPolicies = []string{"no", "on-success", "on-failure", "on-abnormal", "on-watchdog", "on-abort", "always"} diff --git a/vendor/github.com/containers/podman/v3/pkg/timetype/timestamp.go b/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go similarity index 96% rename from vendor/github.com/containers/podman/v3/pkg/timetype/timestamp.go rename to vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go index 2de1a005fe9..5e9c6a1591d 100644 --- a/vendor/github.com/containers/podman/v3/pkg/timetype/timestamp.go +++ b/vendor/github.com/containers/podman/v4/pkg/timetype/timestamp.go @@ -34,13 +34,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - if strings.Contains(value, ".") { // nolint(gocritic) + switch { + case strings.Contains(value, "."): if parseInLocation { format = rFC3339NanoLocal } else { format = time.RFC3339Nano } - } else if strings.Contains(value, "T") { + case strings.Contains(value, "T"): // we want the number of colons in the T portion of the timestamp tcolons := strings.Count(value, ":") // if parseInLocation is off and we have a +/- zone offset (not Z) then @@ -68,9 +69,9 @@ func GetTimestamp(value string, reference time.Time) (string, error) { format = time.RFC3339 } } - } else if parseInLocation { + case parseInLocation: format = dateLocal - } else { + default: format = dateWithZone } diff --git a/vendor/github.com/containers/podman/v3/pkg/trust/config.go b/vendor/github.com/containers/podman/v4/pkg/trust/config.go similarity index 76% rename from vendor/github.com/containers/podman/v3/pkg/trust/config.go rename to vendor/github.com/containers/podman/v4/pkg/trust/config.go index 164df2a9083..6186d4cbd19 100644 --- a/vendor/github.com/containers/podman/v3/pkg/trust/config.go +++ b/vendor/github.com/containers/podman/v4/pkg/trust/config.go @@ -2,11 +2,11 @@ package trust // Policy describes a basic trust policy configuration type Policy struct { - Name string `json:"name"` + Transport string `json:"transport"` + Name string `json:"name,omitempty"` RepoName string `json:"repo_name,omitempty"` Keys []string `json:"keys,omitempty"` - SignatureStore string `json:"sigstore"` - Transport string `json:"transport"` + SignatureStore string `json:"sigstore,omitempty"` Type string `json:"type"` GPGId string `json:"gpg_id,omitempty"` } diff --git a/vendor/github.com/containers/podman/v3/pkg/trust/trust.go b/vendor/github.com/containers/podman/v4/pkg/trust/trust.go similarity index 99% rename from vendor/github.com/containers/podman/v3/pkg/trust/trust.go rename to vendor/github.com/containers/podman/v4/pkg/trust/trust.go index 584d1fa0278..1d0cc61babf 100644 --- a/vendor/github.com/containers/podman/v3/pkg/trust/trust.go +++ b/vendor/github.com/containers/podman/v4/pkg/trust/trust.go @@ -21,7 +21,7 @@ import ( // PolicyContent struct for policy.json file type PolicyContent struct { Default []RepoContent `json:"default"` - Transports TransportsContent `json:"transports"` + Transports TransportsContent `json:"transports,omitempty"` } // RepoContent struct used under each repo diff --git a/vendor/github.com/containers/podman/v3/pkg/util/filters.go b/vendor/github.com/containers/podman/v4/pkg/util/filters.go similarity index 85% rename from vendor/github.com/containers/podman/v3/pkg/util/filters.go rename to vendor/github.com/containers/podman/v4/pkg/util/filters.go index e252c1ddf77..05ba4f82cdb 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/filters.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/filters.go @@ -4,10 +4,11 @@ import ( "encoding/json" "fmt" "net/http" + "path/filepath" "strings" "time" - "github.com/containers/podman/v3/pkg/timetype" + "github.com/containers/podman/v4/pkg/timetype" "github.com/pkg/errors" ) @@ -94,6 +95,17 @@ func PrepareFilters(r *http.Request) (*map[string][]string, error) { return &filterMap, nil } +func matchPattern(pattern string, value string) bool { + if strings.Contains(pattern, "*") { + filter := fmt.Sprintf("*%s*", pattern) + filter = strings.ReplaceAll(filter, string(filepath.Separator), "|") + newName := strings.ReplaceAll(value, string(filepath.Separator), "|") + match, _ := filepath.Match(filter, newName) + return match + } + return false +} + // MatchLabelFilters matches labels and returns true if they are valid func MatchLabelFilters(filterValues []string, labels map[string]string) bool { outer: @@ -106,7 +118,7 @@ outer: filterValue = "" } for labelKey, labelValue := range labels { - if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) { + if ((labelKey == filterKey) || matchPattern(filterKey, labelKey)) && (filterValue == "" || labelValue == filterValue) { continue outer } } diff --git a/vendor/github.com/containers/podman/v3/pkg/util/kube.go b/vendor/github.com/containers/podman/v4/pkg/util/kube.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/util/kube.go rename to vendor/github.com/containers/podman/v4/pkg/util/kube.go diff --git a/vendor/github.com/containers/podman/v3/pkg/util/mountOpts.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go similarity index 80% rename from vendor/github.com/containers/podman/v3/pkg/util/mountOpts.go rename to vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go index f13dc94ecbc..e37394619e3 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/mountOpts.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go @@ -25,18 +25,41 @@ type defaultMountOptions struct { // The sourcePath variable, if not empty, contains a bind mount source. func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) { var ( - foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU bool + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap bool ) newOptions := make([]string, 0, len(options)) for _, opt := range options { // Some options have parameters - size, mode splitOpt := strings.SplitN(opt, "=", 2) + + // add advanced options such as upperdir=/path and workdir=/path, when overlay is specified + if foundOverlay { + if strings.Contains(opt, "upperdir") { + newOptions = append(newOptions, opt) + continue + } + if strings.Contains(opt, "workdir") { + newOptions = append(newOptions, opt) + continue + } + } + + if strings.HasPrefix(splitOpt[0], "idmap") { + if foundIdmap { + return nil, errors.Wrapf(ErrDupeMntOption, "the 'idmap' option can only be set once") + } + foundIdmap = true + newOptions = append(newOptions, opt) + continue + } + switch splitOpt[0] { case "O": - if len(options) > 1 { - return nil, errors.Wrapf(ErrDupeMntOption, "'O' option can not be used with other options") - } + foundOverlay = true + case "volume-opt": + // Volume-opt should be relayed and processed by driver. + newOptions = append(newOptions, opt) case "exec", "noexec": if foundExec { return nil, errors.Wrapf(ErrDupeMntOption, "only one of 'noexec' and 'exec' can be used") @@ -155,3 +178,15 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string return newOptions, nil } + +func ParseDriverOpts(option string) (string, string, error) { + token := strings.SplitN(option, "=", 2) + if len(token) != 2 { + return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts") + } + opt := strings.SplitN(token[1], "=", 2) + if len(opt) != 2 { + return "", "", errors.Wrapf(ErrBadMntOption, "cannot parse driver opts") + } + return opt[0], opt[1], nil +} diff --git a/vendor/github.com/containers/podman/v3/pkg/util/mountOpts_linux.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_linux.go similarity index 100% rename from vendor/github.com/containers/podman/v3/pkg/util/mountOpts_linux.go rename to vendor/github.com/containers/podman/v4/pkg/util/mountOpts_linux.go diff --git a/vendor/github.com/containers/podman/v3/pkg/util/mountOpts_other.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go similarity index 87% rename from vendor/github.com/containers/podman/v3/pkg/util/mountOpts_other.go rename to vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go index 6a34942e54f..64b4dd1d949 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/mountOpts_other.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package util diff --git a/vendor/github.com/containers/podman/v3/pkg/util/utils.go b/vendor/github.com/containers/podman/v4/pkg/util/utils.go similarity index 85% rename from vendor/github.com/containers/podman/v3/pkg/util/utils.go rename to vendor/github.com/containers/podman/v4/pkg/util/utils.go index 208d815d9af..a0bf8b50d11 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/utils.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils.go @@ -3,6 +3,7 @@ package util import ( "encoding/json" "fmt" + "io/fs" "math" "os" "os/user" @@ -17,17 +18,17 @@ import ( "github.com/BurntSushi/toml" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" - "github.com/containers/podman/v3/pkg/errorhandling" - "github.com/containers/podman/v3/pkg/namespaces" - "github.com/containers/podman/v3/pkg/rootless" - "github.com/containers/podman/v3/pkg/signal" + "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v4/pkg/namespaces" + "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/signal" "github.com/containers/storage/pkg/idtools" stypes "github.com/containers/storage/types" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) var containerConfig *config.Config @@ -64,7 +65,7 @@ func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { } if password == "" { fmt.Print("Password: ") - termPassword, err := terminal.ReadPassword(0) + termPassword, err := term.ReadPassword(0) if err != nil { return nil, errors.Wrapf(err, "could not read password from terminal") } @@ -346,55 +347,84 @@ func ParseSignal(rawSignal string) (syscall.Signal, error) { // GetKeepIDMapping returns the mappings and the user to use when keep-id is used func GetKeepIDMapping() (*stypes.IDMappingOptions, int, int, error) { + if !rootless.IsRootless() { + return nil, -1, -1, errors.New("keep-id is only supported in rootless mode") + } options := stypes.IDMappingOptions{ - HostUIDMapping: true, - HostGIDMapping: true, + HostUIDMapping: false, + HostGIDMapping: false, } - uid, gid := 0, 0 - if rootless.IsRootless() { - min := func(a, b int) int { - if a < b { - return a - } - return b + min := func(a, b int) int { + if a < b { + return a } + return b + } - uid = rootless.GetRootlessUID() - gid = rootless.GetRootlessGID() - - uids, gids, err := rootless.GetConfiguredMappings() - if err != nil { - return nil, -1, -1, errors.Wrapf(err, "cannot read mappings") - } - maxUID, maxGID := 0, 0 - for _, u := range uids { - maxUID += u.Size - } - for _, g := range gids { - maxGID += g.Size - } + uid := rootless.GetRootlessUID() + gid := rootless.GetRootlessGID() - options.UIDMap, options.GIDMap = nil, nil + uids, gids, err := rootless.GetConfiguredMappings() + if err != nil { + return nil, -1, -1, errors.Wrapf(err, "cannot read mappings") + } + if len(uids) == 0 || len(gids) == 0 { + return nil, -1, -1, errors.Wrapf(err, "keep-id requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly") + } + maxUID, maxGID := 0, 0 + for _, u := range uids { + maxUID += u.Size + } + for _, g := range gids { + maxGID += g.Size + } - options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) - options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) - if maxUID > uid { - options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) - } + options.UIDMap, options.GIDMap = nil, nil - options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) - options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) - if maxGID > gid { - options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) - } + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(uid, maxUID)}) + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: 0, Size: 1}) + if maxUID > uid { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid + 1, HostID: uid + 1, Size: maxUID - uid}) + } - options.HostUIDMapping = false - options.HostGIDMapping = false + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: 0, HostID: 1, Size: min(gid, maxGID)}) + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: 0, Size: 1}) + if maxGID > gid { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid + 1, HostID: gid + 1, Size: maxGID - gid}) } - // Simply ignore the setting and do not setup an inner namespace for root as it is a no-op + return &options, uid, gid, nil } +// GetNoMapMapping returns the mappings and the user to use when nomap is used +func GetNoMapMapping() (*stypes.IDMappingOptions, int, int, error) { + if !rootless.IsRootless() { + return nil, -1, -1, errors.New("nomap is only supported in rootless mode") + } + options := stypes.IDMappingOptions{ + HostUIDMapping: false, + HostGIDMapping: false, + } + uids, gids, err := rootless.GetConfiguredMappings() + if err != nil { + return nil, -1, -1, errors.Wrapf(err, "cannot read mappings") + } + if len(uids) == 0 || len(gids) == 0 { + return nil, -1, -1, errors.Wrapf(err, "nomap requires additional UIDs or GIDs defined in /etc/subuid and /etc/subgid to function correctly") + } + options.UIDMap, options.GIDMap = nil, nil + uid, gid := 0, 0 + for _, u := range uids { + options.UIDMap = append(options.UIDMap, idtools.IDMap{ContainerID: uid, HostID: uid + 1, Size: u.Size}) + uid += u.Size + } + for _, g := range gids { + options.GIDMap = append(options.GIDMap, idtools.IDMap{ContainerID: gid, HostID: gid + 1, Size: g.Size}) + gid += g.Size + } + return &options, 0, 0, nil +} + // ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*stypes.IDMappingOptions, error) { options := stypes.IDMappingOptions{ @@ -414,7 +444,7 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin options.AutoUserNsOpts = *opts return &options, nil } - if mode.IsKeepID() { + if mode.IsKeepID() || mode.IsNoMap() { options.HostUIDMapping = false options.HostGIDMapping = false return &options, nil @@ -656,7 +686,7 @@ func CreateCidFile(cidfile string, id string) error { if os.IsExist(err) { return errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", cidfile) } - return errors.Errorf("error opening cidfile %s", cidfile) + return errors.Errorf("opening cidfile %s", cidfile) } if _, err = cidFile.WriteString(id); err != nil { logrus.Error(err) @@ -665,8 +695,8 @@ func CreateCidFile(cidfile string, id string) error { return nil } -// DefaultCPUPeriod is the default CPU period is 100us, which is the same default -// as Kubernetes. +// DefaultCPUPeriod is the default CPU period (100ms) in microseconds, which is +// the same default as Kubernetes. const DefaultCPUPeriod uint64 = 100000 // CoresToPeriodAndQuota converts a fraction of cores to the equivalent @@ -701,25 +731,27 @@ func IDtoolsToRuntimeSpec(idMaps []idtools.IDMap) (convertedIDMap []specs.LinuxI return convertedIDMap } -var socketPath string - -func SetSocketPath(path string) { - socketPath = path -} - -func SocketPath() (string, error) { - if socketPath != "" { - return socketPath, nil - } - xdg, err := GetRuntimeDir() - if err != nil { - return "", err - } - if len(xdg) == 0 { - // If no xdg is returned, assume root socket - xdg = "/run" +func LookupUser(name string) (*user.User, error) { + // Assume UID look up first, if it fails lookup by username + if u, err := user.LookupId(name); err == nil { + return u, nil } + return user.Lookup(name) +} - // Glue the socket path together - return filepath.Join(xdg, "podman", "podman.sock"), nil +// SizeOfPath determines the file usage of a given path. it was called volumeSize in v1 +// and now is made to be generic and take a path instead of a libpod volume +func SizeOfPath(path string) (uint64, error) { + var size uint64 + err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err == nil && !d.IsDir() { + info, err := d.Info() + if err != nil { + return err + } + size += uint64(info.Size()) + } + return err + }) + return size, err } diff --git a/vendor/github.com/containers/podman/v3/pkg/util/utils_darwin.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go similarity index 84% rename from vendor/github.com/containers/podman/v3/pkg/util/utils_darwin.go rename to vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go index 33a46a5d4f8..66ae85e9cc9 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/utils_darwin.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go @@ -1,4 +1,5 @@ -//+build darwin +//go:build darwin +// +build darwin package util diff --git a/vendor/github.com/containers/podman/v3/pkg/util/utils_linux.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go similarity index 74% rename from vendor/github.com/containers/podman/v3/pkg/util/utils_linux.go rename to vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go index 288137ca599..0b21bf3c537 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/utils_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go @@ -2,6 +2,7 @@ package util import ( "fmt" + "io/fs" "os" "path/filepath" "syscall" @@ -23,24 +24,30 @@ func GetContainerPidInformationDescriptors() ([]string, error) { // Symlinks to nodes are ignored. func FindDeviceNodes() (map[string]string, error) { nodes := make(map[string]string) - err := filepath.Walk("/dev", func(path string, info os.FileInfo, err error) error { + err := filepath.WalkDir("/dev", func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Warnf("Error descending into path %s: %v", path, err) return filepath.SkipDir } // If we aren't a device node, do nothing. - if info.Mode()&(os.ModeDevice|os.ModeCharDevice) == 0 { + if d.Type()&(os.ModeDevice|os.ModeCharDevice) == 0 { return nil } + info, err := d.Info() + if err != nil { + return err + } // We are a device node. Get major/minor. sysstat, ok := info.Sys().(*syscall.Stat_t) if !ok { return errors.Errorf("Could not convert stat output for use") } - major := sysstat.Rdev / 256 - minor := sysstat.Rdev % 256 + // We must typeconvert sysstat.Rdev from uint64->int to avoid constant overflow + rdev := int(sysstat.Rdev) + major := ((rdev >> 8) & 0xfff) | ((rdev >> 32) & ^0xfff) + minor := (rdev & 0xff) | ((rdev >> 12) & ^0xff) nodes[fmt.Sprintf("%d:%d", major, minor)] = path diff --git a/vendor/github.com/containers/podman/v3/pkg/util/utils_supported.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go similarity index 98% rename from vendor/github.com/containers/podman/v3/pkg/util/utils_supported.go rename to vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go index 6eba0bc3c68..50e4b1b7b17 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/utils_supported.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package util @@ -11,7 +12,7 @@ import ( "path/filepath" "syscall" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/podman/v4/pkg/rootless" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v3/pkg/util/utils_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go similarity index 89% rename from vendor/github.com/containers/podman/v3/pkg/util/utils_unsupported.go rename to vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go index 62805d7c8dd..8963464933d 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/utils_unsupported.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go @@ -1,3 +1,4 @@ +//go:build darwin || windows // +build darwin windows package util diff --git a/vendor/github.com/containers/podman/v3/pkg/util/utils_windows.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go similarity index 98% rename from vendor/github.com/containers/podman/v3/pkg/util/utils_windows.go rename to vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go index 46ca5e7f18a..2732124f21f 100644 --- a/vendor/github.com/containers/podman/v3/pkg/util/utils_windows.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package util diff --git a/vendor/github.com/containers/podman/v3/utils/ports.go b/vendor/github.com/containers/podman/v4/utils/ports.go similarity index 92% rename from vendor/github.com/containers/podman/v3/utils/ports.go rename to vendor/github.com/containers/podman/v4/utils/ports.go index 0a4f67dcc13..57a6f82759e 100644 --- a/vendor/github.com/containers/podman/v3/utils/ports.go +++ b/vendor/github.com/containers/podman/v4/utils/ports.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" ) -// Find a random, open port on the host +// Find a random, open port on the host. func GetRandomPort() (int, error) { l, err := net.Listen("tcp", ":0") if err != nil { diff --git a/vendor/github.com/containers/podman/v3/utils/utils.go b/vendor/github.com/containers/podman/v4/utils/utils.go similarity index 69% rename from vendor/github.com/containers/podman/v3/utils/utils.go rename to vendor/github.com/containers/podman/v4/utils/utils.go index b08630d2f7e..d0e3dbb462c 100644 --- a/vendor/github.com/containers/podman/v3/utils/utils.go +++ b/vendor/github.com/containers/podman/v4/utils/utils.go @@ -2,6 +2,7 @@ package utils import ( "bytes" + "crypto/rand" "fmt" "io" "io/ioutil" @@ -11,15 +12,16 @@ import ( "strings" "sync" - "github.com/containers/podman/v3/libpod/define" - "github.com/containers/podman/v3/pkg/cgroups" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/libpod/define" "github.com/containers/storage/pkg/archive" + "github.com/godbus/dbus/v5" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // ExecCmd executes a command with args and returns its output as a string along -// with an error, if any +// with an error, if any. func ExecCmd(name string, args ...string) (string, error) { cmd := exec.Command(name, args...) var stdout bytes.Buffer @@ -41,9 +43,7 @@ func ExecCmdWithStdStreams(stdin io.Reader, stdout, stderr io.Writer, env []stri cmd.Stdin = stdin cmd.Stdout = stdout cmd.Stderr = stderr - if env != nil { - cmd.Env = env - } + cmd.Env = env err := cmd.Run() if err != nil { @@ -174,25 +174,66 @@ func RunsOnSystemd() bool { return runsOnSystemd } -func moveProcessToScope(pidPath, slice, scope string) error { +func moveProcessPIDFileToScope(pidPath, slice, scope string) error { data, err := ioutil.ReadFile(pidPath) if err != nil { + // do not raise an error if the file doesn't exist + if os.IsNotExist(err) { + return nil + } return errors.Wrapf(err, "cannot read pid file %s", pidPath) } pid, err := strconv.ParseUint(string(data), 10, 0) if err != nil { return errors.Wrapf(err, "cannot parse pid file %s", pidPath) } - return RunUnderSystemdScope(int(pid), slice, scope) + + return moveProcessToScope(int(pid), slice, scope) +} + +func moveProcessToScope(pid int, slice, scope string) error { + err := RunUnderSystemdScope(int(pid), slice, scope) + // If the PID is not valid anymore, do not return an error. + if dbusErr, ok := err.(dbus.Error); ok { + if dbusErr.Name == "org.freedesktop.DBus.Error.UnixProcessIdUnknown" { + return nil + } + } + return err +} + +// MoveRootlessNetnsSlirpProcessToUserSlice moves the slirp4netns process for the rootless netns +// into a different scope so that systemd does not kill it with a container. +func MoveRootlessNetnsSlirpProcessToUserSlice(pid int) error { + randBytes := make([]byte, 4) + _, err := rand.Read(randBytes) + if err != nil { + return err + } + return moveProcessToScope(pid, "user.slice", fmt.Sprintf("rootless-netns-%x.scope", randBytes)) } // MovePauseProcessToScope moves the pause process used for rootless mode to keep the namespaces alive to // a separate scope. func MovePauseProcessToScope(pausePidPath string) { - err := moveProcessToScope(pausePidPath, "user.slice", "podman-pause.scope") - if err != nil { - unified, err := cgroups.IsCgroup2UnifiedMode() + var err error + + for i := 0; i < 10; i++ { + randBytes := make([]byte, 4) + _, err = rand.Read(randBytes) if err != nil { + logrus.Errorf("failed to read random bytes: %v", err) + continue + } + err = moveProcessPIDFileToScope(pausePidPath, "user.slice", fmt.Sprintf("podman-pause-%x.scope", randBytes)) + if err == nil { + return + } + } + + if err != nil { + unified, err2 := cgroups.IsCgroup2UnifiedMode() + if err2 != nil { logrus.Warnf("Failed to detect if running with cgroup unified: %v", err) } if RunsOnSystemd() && unified { @@ -202,3 +243,27 @@ func MovePauseProcessToScope(pausePidPath string) { } } } + +// CreateSCPCommand takes an existing command, appends the given arguments and returns a configured podman command for image scp +func CreateSCPCommand(cmd *exec.Cmd, command []string) *exec.Cmd { + cmd.Args = append(cmd.Args, command...) + cmd.Env = os.Environ() + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + return cmd +} + +// LoginUser starts the user process on the host so that image scp can use systemd-run +func LoginUser(user string) (*exec.Cmd, error) { + sleep, err := exec.LookPath("sleep") + if err != nil { + return nil, err + } + machinectl, err := exec.LookPath("machinectl") + if err != nil { + return nil, err + } + cmd := exec.Command(machinectl, "shell", "-q", user+"@.host", sleep, "inf") + err = cmd.Start() + return cmd, err +} diff --git a/vendor/github.com/containers/podman/v3/utils/utils_supported.go b/vendor/github.com/containers/podman/v4/utils/utils_supported.go similarity index 92% rename from vendor/github.com/containers/podman/v3/utils/utils_supported.go rename to vendor/github.com/containers/podman/v4/utils/utils_supported.go index 1404e319416..493ea61ce4e 100644 --- a/vendor/github.com/containers/podman/v3/utils/utils_supported.go +++ b/vendor/github.com/containers/podman/v4/utils/utils_supported.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package utils @@ -5,14 +6,15 @@ package utils import ( "bufio" "bytes" + "context" "fmt" "io/ioutil" "os" "path/filepath" "strings" - "github.com/containers/podman/v3/pkg/cgroups" - "github.com/containers/podman/v3/pkg/rootless" + "github.com/containers/common/pkg/cgroups" + "github.com/containers/podman/v4/pkg/rootless" systemdDbus "github.com/coreos/go-systemd/v22/dbus" "github.com/godbus/dbus/v5" "github.com/pkg/errors" @@ -31,7 +33,7 @@ func RunUnderSystemdScope(pid int, slice string, unitName string) error { return err } } else { - conn, err = systemdDbus.New() + conn, err = systemdDbus.NewWithContext(context.Background()) if err != nil { return err } @@ -42,10 +44,10 @@ func RunUnderSystemdScope(pid int, slice string, unitName string) error { properties = append(properties, newProp("Delegate", true)) properties = append(properties, newProp("DefaultDependencies", false)) ch := make(chan string) - _, err = conn.StartTransientUnit(unitName, "replace", properties, ch) + _, err = conn.StartTransientUnitContext(context.Background(), unitName, "replace", properties, ch) if err != nil { // On errors check if the cgroup already exists, if it does move the process there - if props, err := conn.GetUnitTypeProperties(unitName, "Scope"); err == nil { + if props, err := conn.GetUnitTypePropertiesContext(context.Background(), unitName, "Scope"); err == nil { if cgroup, ok := props["ControlGroup"].(string); ok && cgroup != "" { if err := moveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { return nil diff --git a/vendor/github.com/containers/podman/v3/utils/utils_windows.go b/vendor/github.com/containers/podman/v4/utils/utils_windows.go similarity index 96% rename from vendor/github.com/containers/podman/v3/utils/utils_windows.go rename to vendor/github.com/containers/podman/v4/utils/utils_windows.go index 1a2196029bc..2c159ab06db 100644 --- a/vendor/github.com/containers/podman/v3/utils/utils_windows.go +++ b/vendor/github.com/containers/podman/v4/utils/utils_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package utils diff --git a/vendor/github.com/containers/podman/v3/version/version.go b/vendor/github.com/containers/podman/v4/version/version.go similarity index 93% rename from vendor/github.com/containers/podman/v3/version/version.go rename to vendor/github.com/containers/podman/v4/version/version.go index c6bd2c2392b..6555a5857ad 100644 --- a/vendor/github.com/containers/podman/v3/version/version.go +++ b/vendor/github.com/containers/podman/v4/version/version.go @@ -27,7 +27,7 @@ const ( // NOTE: remember to bump the version at the top // of the top-level README.md file when this is // bumped. -var Version = semver.MustParse("4.0.0-dev") +var Version = semver.MustParse("4.1.0") // See https://docs.docker.com/engine/api/v1.40/ // libpod compat handlers are expected to honor docker API versions @@ -38,7 +38,7 @@ var Version = semver.MustParse("4.0.0-dev") var APIVersion = map[Tree]map[Level]semver.Version{ Libpod: { CurrentAPI: Version, - MinimalAPI: semver.MustParse("3.1.0"), + MinimalAPI: semver.MustParse("4.0.0"), }, Compat: { CurrentAPI: semver.MustParse("1.40.0"), diff --git a/vendor/github.com/containers/psgo/internal/proc/ns.go b/vendor/github.com/containers/psgo/internal/proc/ns.go index 28ee6a2c96d..9e77b865b39 100644 --- a/vendor/github.com/containers/psgo/internal/proc/ns.go +++ b/vendor/github.com/containers/psgo/internal/proc/ns.go @@ -19,13 +19,9 @@ import ( "fmt" "io" "os" -) -type IDMap struct { - ContainerID int - HostID int - Size int -} + "github.com/containers/storage/pkg/idtools" +) // ParsePIDNamespace returns the content of /proc/$pid/ns/pid. func ParsePIDNamespace(pid string) (string, error) { @@ -46,14 +42,14 @@ func ParseUserNamespace(pid string) (string, error) { } // ReadMappings reads the user namespace mappings at the specified path -func ReadMappings(path string) ([]IDMap, error) { +func ReadMappings(path string) ([]idtools.IDMap, error) { file, err := os.Open(path) if err != nil { return nil, err } defer file.Close() - mappings := []IDMap{} + var mappings []idtools.IDMap buf := bufio.NewReader(file) for { @@ -68,10 +64,10 @@ func ReadMappings(path string) ([]IDMap, error) { return mappings, nil } - containerID, hostID, size := 0, 0, 0 + var containerID, hostID, size int if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil { return nil, fmt.Errorf("cannot parse %s: %w", string(line), err) } - mappings = append(mappings, IDMap{ContainerID: containerID, HostID: hostID, Size: size}) + mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size}) } } diff --git a/vendor/github.com/containers/psgo/internal/proc/status.go b/vendor/github.com/containers/psgo/internal/proc/status.go index 1896b5c0784..1d2247cbd0c 100644 --- a/vendor/github.com/containers/psgo/internal/proc/status.go +++ b/vendor/github.com/containers/psgo/internal/proc/status.go @@ -18,8 +18,11 @@ import ( "bufio" "fmt" "os" - "os/exec" + "strconv" "strings" + "sync" + + "github.com/containers/storage/pkg/idtools" ) // Status is a direct translation of a `/proc/[pid]/status`, which provides much @@ -173,23 +176,8 @@ type Status struct { NonvoluntaryCtxtSwitches string } -// readStatusUserNS joins the user namespace of pid and returns the content of -// /proc/pid/status as a string slice. -func readStatusUserNS(pid string) ([]string, error) { - path := fmt.Sprintf("/proc/%s/status", pid) - args := []string{"nsenter", "-U", "-t", pid, "cat", path} - - c := exec.Command(args[0], args[1:]...) - output, err := c.CombinedOutput() - if err != nil { - return nil, fmt.Errorf("error executing %q: %w", strings.Join(args, " "), err) - } - - return strings.Split(string(output), "\n"), nil -} - -// readStatusDefault returns the content of /proc/pid/status as a string slice. -func readStatusDefault(pid string) ([]string, error) { +// readStatus returns the content of /proc/pid/status as a string slice. +func readStatus(pid string) ([]string, error) { path := fmt.Sprintf("/proc/%s/status", pid) f, err := os.Open(path) if err != nil { @@ -203,21 +191,81 @@ func readStatusDefault(pid string) ([]string, error) { return lines, nil } -// ParseStatus parses the /proc/$pid/status file and returns a *Status. -func ParseStatus(pid string, joinUserNS bool) (*Status, error) { - var lines []string - var err error +// mapField maps a single string-typed ID field given the set of mappings. If +// no mapping exists, the overflow uid/gid is used. +func mapStatusField(field *string, mapping []idtools.IDMap, overflow string) { + hostId, err := strconv.Atoi(*field) + if err != nil { + *field = overflow + return + } + contId, err := idtools.RawToContainer(hostId, mapping) + if err != nil { + *field = overflow + return + } + *field = strconv.Itoa(contId) +} - if joinUserNS { - lines, err = readStatusUserNS(pid) - } else { - lines, err = readStatusDefault(pid) +var ( + overflowOnce sync.Once + overflowUid = "65534" + overflowGid = "65534" +) + +func overflowIds() (string, string) { + overflowOnce.Do(func() { + if uid, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + overflowUid = strings.TrimSpace(string(uid)) + } + if gid, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + overflowGid = strings.TrimSpace(string(gid)) + } + }) + return overflowUid, overflowGid +} + +// mapStatus takes a Status struct and remaps all of the relevant fields to +// match the user namespace of the target process. +func mapStatus(pid string, status *Status) (*Status, error) { + uidMap, err := ReadMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, err + } + gidMap, err := ReadMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, err + } + overflowUid, overflowGid := overflowIds() + for i := range status.Uids { + mapStatusField(&status.Uids[i], uidMap, overflowUid) + } + for i := range status.Gids { + mapStatusField(&status.Gids[i], gidMap, overflowGid) } + for i := range status.Groups { + mapStatusField(&status.Groups[i], gidMap, overflowGid) + } + return status, nil +} +// ParseStatus parses the /proc/$pid/status file and returns a *Status. +func ParseStatus(pid string, mapUserNS bool) (*Status, error) { + lines, err := readStatus(pid) + if err != nil { + return nil, err + } + status, err := parseStatus(pid, lines) if err != nil { return nil, err } - return parseStatus(pid, lines) + if mapUserNS { + status, err = mapStatus(pid, status) + if err != nil { + return nil, err + } + } + return status, nil } // parseStatus extracts data from lines and returns a *Status. diff --git a/vendor/github.com/containers/psgo/psgo.go b/vendor/github.com/containers/psgo/psgo.go index ea893e7ca86..d6cfcef4dc0 100644 --- a/vendor/github.com/containers/psgo/psgo.go +++ b/vendor/github.com/containers/psgo/psgo.go @@ -41,28 +41,18 @@ import ( "github.com/containers/psgo/internal/dev" "github.com/containers/psgo/internal/proc" "github.com/containers/psgo/internal/process" + "github.com/containers/storage/pkg/idtools" "golang.org/x/sys/unix" ) -// IDMap specifies a mapping range from the host to the container IDs. -type IDMap struct { - // ContainerID is the first ID in the container. - ContainerID int - // HostID is the first ID in the host. - HostID int - // Size specifies how long is the range. e.g. 1 means a single user - // is mapped. - Size int -} - // JoinNamespaceOpts specifies different options for joining the specified namespaces. type JoinNamespaceOpts struct { // UIDMap specifies a mapping for UIDs in the container. If specified // huser will perform the reverse mapping. - UIDMap []IDMap + UIDMap []idtools.IDMap // GIDMap specifies a mapping for GIDs in the container. If specified // hgroup will perform the reverse mapping. - GIDMap []IDMap + GIDMap []idtools.IDMap // FillMappings specified whether UIDMap and GIDMap must be initialized // with the current user namespace. @@ -102,7 +92,7 @@ type aixFormatDescriptor struct { } // findID converts the specified id to the host mapping -func findID(idStr string, mapping []IDMap, lookupFunc func(uid string) (string, error), overflowFile string) (string, error) { +func findID(idStr string, mapping []idtools.IDMap, lookupFunc func(uid string) (string, error), overflowFile string) (string, error) { if len(mapping) == 0 { return idStr, nil } @@ -350,29 +340,16 @@ func JoinNamespaceAndProcessInfo(pid string, descriptors []string) ([][]string, return JoinNamespaceAndProcessInfoWithOptions(pid, descriptors, &JoinNamespaceOpts{}) } -func readMappings(path string) ([]IDMap, error) { - mappings, err := proc.ReadMappings(path) - if err != nil { - return nil, err - } - var res []IDMap - for _, i := range mappings { - m := IDMap{ContainerID: i.ContainerID, HostID: i.HostID, Size: i.Size} - res = append(res, m) - } - return res, nil -} - func contextFromOptions(options *JoinNamespaceOpts) (*psContext, error) { ctx := new(psContext) ctx.opts = options if ctx.opts != nil && ctx.opts.FillMappings { - uidMappings, err := readMappings("/proc/self/uid_map") + uidMappings, err := proc.ReadMappings("/proc/self/uid_map") if err != nil { return nil, err } - gidMappings, err := readMappings("/proc/self/gid_map") + gidMappings, err := proc.ReadMappings("/proc/self/gid_map") if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index d080d790c1a..fd3d310548d 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,17 +17,17 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" UBUNTU_NAME: "ubuntu-2104" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - _BUILT_IMAGE_SUFFIX: "c6431352024203264" - FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}" - PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}" - UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}" + IMAGE_SUFFIX: "c4512539143831552" + FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" + PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" + UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" #### #### Command variables to help avoid duplication @@ -117,7 +117,7 @@ lint_task: env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -132,7 +132,7 @@ lint_task: meta_task: container: - image: "quay.io/libpod/imgts:${_BUILT_IMAGE_SUFFIX}" + image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}" cpu: 1 memory: 1 @@ -154,7 +154,7 @@ meta_task: vendor_task: container: - image: golang:1.15 + image: golang:1.16 modules_cache: fingerprint_script: cat go.sum folder: $GOPATH/pkg/mod @@ -172,6 +172,6 @@ success_task: - meta - vendor container: - image: golang:1.15 + image: golang:1.16 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index dbc1f7c9987..244576d546a 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -51,13 +51,16 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*. containers-storage: $(sources) ## build using gc on the host $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w + binary local-binary: containers-storage local-gccgo: ## build using gccgo on the host GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage -local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd - @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \ +local-cross: ## cross build the binaries for arm, darwin, and freebsd + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ @@ -66,44 +69,44 @@ local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd done cross: ## cross build the binaries for arm, darwin, and\nfreebsd using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ docs: install.tools ## build the docs on the host $(MAKE) -C docs docs gccgo: ## build using gccgo using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ test: local-binary ## build the binaries and run the tests using VMs - $(RUNINVM) make local-binary local-cross local-test-unit local-test-integration + $(RUNINVM) $(MAKE) local-binary local-cross local-test-unit local-test-integration local-test-unit: local-binary ## run the unit tests on the host (requires\nsuperuser privileges) @$(GO) test $(MOD_VENDOR) $(BUILDFLAGS) $(TESTFLAGS) $(shell $(GO) list ./... | grep -v ^$(PACKAGE)/vendor) test-unit: local-binary ## run the unit tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-test-integration: local-binary ## run the integration tests on the host (requires\nsuperuser privileges) @cd tests; ./test_runner.bash test-integration: local-binary ## run the integration tests using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ local-validate: ## validate DCO and gofmt on the host @./hack/git-validation.sh @./hack/gofmt.sh validate: ## validate DCO, gofmt, ./pkg/ isolation, golint,\ngo vet and vendor using VMs - $(RUNINVM) make local-$@ + $(RUNINVM) $(MAKE) local-$@ install.tools: - make -C tests/tools + $(MAKE) -C tests/tools $(FFJSON): - make -C tests/tools + $(MAKE) -C tests/tools install.docs: docs - make -C docs install + $(MAKE) -C docs install install: install.docs diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index bf50e910e62..7d47e599800 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.37.0 +1.41.0 diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile deleted file mode 100644 index c82c1f81bd3..00000000000 --- a/vendor/github.com/containers/storage/Vagrantfile +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -# -# The fedora/28-cloud-base and debian/jessie64 boxes are also available for -# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to -# "virtualbox" to use them instead. -# -Vagrant.configure("2") do |config| - config.vm.define "fedora" do |c| - c.vm.box = "fedora/28-cloud-base" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end - config.vm.define "debian" do |c| - c.vm.box = "debian/jessie64" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end -end diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index b4f773f2b73..a8b20f03a01 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -84,8 +84,17 @@ type ContainerStore interface { // SetNames updates the list of names associated with the container // with the specified ID. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the container with + // the specified id. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the container with + // the specified id. + RemoveNames(id string, names []string) error + // Get retrieves information about a container given an ID or name. Get(id string) (*Container, error) @@ -324,6 +333,12 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) } } + if err := hasOverlappingRanges(options.UIDMap); err != nil { + return nil, err + } + if err := hasOverlappingRanges(options.GIDMap); err != nil { + return nil, err + } if err == nil { container = &Container{ ID: id, @@ -371,22 +386,40 @@ func (r *containerStore) removeName(container *Container, name string) { container.Names = stringSliceWithoutValue(container.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *containerStore) SetNames(id string, names []string) error { - names = dedupeNames(names) - if container, ok := r.lookup(id); ok { - for _, name := range container.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherContainer, ok := r.byname[name]; ok { - r.removeName(otherContainer, name) - } - r.byname[name] = container + return r.updateNames(id, names, setNames) +} + +func (r *containerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *containerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) } - container.Names = names - return r.Save() + r.byname[name] = container } - return ErrContainerUnknown + container.Names = names + return r.Save() } func (r *containerStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index a566fbffa0f..e66613c098a 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -26,6 +27,7 @@ import ( "bufio" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -649,11 +651,11 @@ func (a *Driver) mounted(mountpoint string) (bool, error) { // Cleanup aufs and unmount all mountpoints func (a *Driver) Cleanup() error { var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { if err != nil { return err } - if !info.IsDir() { + if !d.IsDir() { return nil } dirs = append(dirs, path) diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 3903b1dddd9..339aa0d3809 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package btrfs @@ -16,6 +17,7 @@ import "C" import ( "fmt" + "io/fs" "io/ioutil" "math" "os" @@ -256,7 +258,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { var args C.struct_btrfs_ioctl_vol_args // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { + walkSubvolumes := func(p string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) && p != fullPath { // missing most likely because the path was a subvolume that got removed in the previous iteration @@ -267,20 +269,20 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error { } // we want to check children only so skip itself // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { + if d.IsDir() && p != fullPath { sv, err := isSubvolume(p) if err != nil { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { + if err := subvolDelete(path.Dir(p), d.Name(), quotaEnabled); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } } return nil } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + if err := filepath.WalkDir(path.Join(dirpath, name), walkSubvolumes); err != nil { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index 63bfd2d136f..2db6764c91b 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -50,11 +50,14 @@ func chownByMapsMain() { if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { toHost = nil } + + chowner := newLChowner() + chown := func(path string, info os.FileInfo, _ error) error { if path == "." { return nil } - return platformLChown(path, info, toHost, toContainer) + return chowner.LChown(path, info, toHost, toContainer) } if err := pwalk.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 0387adfc123..c598b936d64 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package graphdriver @@ -6,17 +7,50 @@ import ( "errors" "fmt" "os" + "sync" "syscall" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { st, ok := info.Sys().(*syscall.Stat_t) if !ok { return nil } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + // Map an on-disk UID/GID pair from host to container // using the first map, then back to the host using the // second map. Skip that first step if they're 0, to @@ -42,7 +76,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. UID: uid, GID: gid, } - mappedPair, err := toHost.ToHost(pair) + mappedPair, err := toHost.ToHostOverflow(pair) if err != nil { return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) } @@ -50,7 +84,7 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools. } if uid != int(st.Uid) || gid != int(st.Gid) { cap, err := system.Lgetxattr(path, "security.capability") - if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { + if err != nil && !errors.Is(err, system.EOPNOTSUPP) && !errors.Is(err, system.EOVERFLOW) && err != system.ErrNotSupportedPlatform { return fmt.Errorf("%s: %v", os.Args[0], err) } diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 31bd5bb52dd..1845a4e086c 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package graphdriver @@ -9,6 +10,13 @@ import ( "github.com/containers/storage/pkg/idtools" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type platformChowner struct { +} + +func newLChowner() *platformChowner { + return &platformChowner{} +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { return &os.PathError{"lchown", path, syscall.EWINDOWS} } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index c5168bfdd25..e604b7e3186 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -1,3 +1,4 @@ +//go:build linux && cgo // +build linux,cgo package devmapper @@ -6,6 +7,7 @@ import ( "bufio" "fmt" "io" + "io/fs" "io/ioutil" "os" "os/exec" @@ -419,40 +421,35 @@ func (devices *DeviceSet) constructDeviceIDMap() { } } -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { +func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { + if strings.HasSuffix(name, ".migrated") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if strings.HasPrefix(finfo.Name(), ".") { + if strings.HasPrefix(name, ".") { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == deviceSetMetaFile { + if name == deviceSetMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } - if finfo.Name() == transactionMetaFile { + if name == transactionMetaFile { logrus.Debugf("devmapper: Skipping file %s", path) return nil } logrus.Debugf("devmapper: Loading data for file %s", path) - hash := finfo.Name() - if hash == base { - hash = "" - } - // Include deleted devices also as cleanup delete device logic // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + if _, err := devices.lookupDevice(name); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", name, err) } return nil @@ -462,21 +459,21 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { logrus.Debug("devmapper: loadDeviceFilesOnStart()") defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - var scan = func(path string, info os.FileInfo, err error) error { + var scan = func(path string, d fs.DirEntry, err error) error { if err != nil { logrus.Debugf("devmapper: Can't walk the file %s", path) return nil } // Skip any directories - if info.IsDir() { + if d.IsDir() { return nil } - return devices.deviceFileWalkFunction(path, info) + return devices.deviceFileWalkFunction(path, d.Name()) } - return filepath.Walk(devices.metadataDir(), scan) + return filepath.WalkDir(devices.metadataDir(), scan) } // Should be called with devices.Lock() held. diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go index e1320ee07f6..143cccf92ec 100644 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/driver_freebsd.go @@ -2,15 +2,43 @@ package graphdriver import ( "golang.org/x/sys/unix" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) ) var ( // Slice of drivers that should be used in an order priority = []string{ "zfs", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", } ) +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on FreeBSD. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { var buf unix.Statfs_t diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index a534630df08..b7e681ace45 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -138,6 +138,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p if parent != "" { options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } parentFs, err = driver.Get(parent, options) if err != nil { diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go index 44b3515a854..48fb7a550fa 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ b/vendor/github.com/containers/storage/drivers/overlay/check.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -11,6 +12,7 @@ import ( "syscall" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/system" @@ -218,3 +220,55 @@ func doesVolatile(d string) (bool, error) { }() return true, nil } + +// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of +// a idmapped lower layer. +func supportsIdmappedLowerLayers(home string) (bool, error) { + layerDir, err := ioutil.TempDir(home, "compat") + if err != nil { + return false, err + } + defer func() { + _ = os.RemoveAll(layerDir) + }() + + mergedDir := filepath.Join(layerDir, "merged") + lowerDir := filepath.Join(layerDir, "lower") + lowerMappedDir := filepath.Join(layerDir, "lower-mapped") + upperDir := filepath.Join(layerDir, "upper") + workDir := filepath.Join(layerDir, "work") + + _ = idtools.MkdirAs(mergedDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerDir, 0700, 0, 0) + _ = idtools.MkdirAs(lowerMappedDir, 0700, 0, 0) + _ = idtools.MkdirAs(upperDir, 0700, 0, 0) + _ = idtools.MkdirAs(workDir, 0700, 0, 0) + + idmap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 0, + Size: 1, + }, + } + pid, cleanupFunc, err := createUsernsProcess(idmap, idmap) + if err != nil { + return false, err + } + defer cleanupFunc() + + if err := createIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { + return false, errors.Wrapf(err, "create mapped mount") + } + defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH) + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir) + flags := uintptr(0) + if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil { + return false, err + } + defer func() { + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + }() + return true, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check_115.go b/vendor/github.com/containers/storage/drivers/overlay/check_115.go deleted file mode 100644 index 9ad1b863d8d..00000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/check_115.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !go1.16 - -package overlay - -import ( - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/system" -) - -func scanForMountProgramIndicators(home string) (detected bool, err error) { - err = filepath.Walk(home, func(path string, info os.FileInfo, err error) error { - if detected { - return filepath.SkipDir - } - if err != nil { - return err - } - basename := filepath.Base(path) - if strings.HasPrefix(basename, archive.WhiteoutPrefix) { - detected = true - return filepath.SkipDir - } - if info.IsDir() { - xattrs, err := system.Llistxattr(path) - if err != nil { - return err - } - for _, xattr := range xattrs { - if strings.HasPrefix(xattr, "user.fuseoverlayfs.") || strings.HasPrefix(xattr, "user.containers.") { - detected = true - return filepath.SkipDir - } - } - } - return nil - }) - return detected, err -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go new file mode 100644 index 00000000000..2af33a6fce0 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go @@ -0,0 +1,160 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type attr struct { + attrSet uint64 + attrClr uint64 + propagation uint64 + userNs uint64 +} + +const ( + // _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr + _MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint + + // _OPEN_TREE_CLONE - Clone the source path mount + _OPEN_TREE_CLONE = 0x00000001 //nolint:golint + + // _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd + _MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint +) + +// openTree is a wrapper for the open_tree syscall +func openTree(path string, flags int) (fd int, err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return 0, err + } + + r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return int(r), nil +} + +// moveMount is a wrapper for the the move_mount syscall. +func moveMount(fdTree int, target string) (err error) { + var _p0, _p1 *byte + + empty := "" + + if _p0, err = syscall.BytePtrFromString(target); err != nil { + return err + } + if _p1, err = syscall.BytePtrFromString(empty); err != nil { + return err + } + + flags := _MOVE_MOUNT_F_EMPTY_PATH + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), + uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), + 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = e1 + } + return +} + +// mountSetAttr is a wrapper for the mount_setattr syscall +func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) { + var _p0 *byte + + if _p0, err = syscall.BytePtrFromString(path); err != nil { + return err + } + + _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)), + uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) + if e1 != 0 { + err = e1 + } + return +} + +// createIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace +// for the PID process. +func createIDMappedMount(source, target string, pid int) error { + path := fmt.Sprintf("/proc/%d/ns/user", pid) + userNsFile, err := os.Open(path) + if err != nil { + return errors.Wrapf(err, "unable to get user ns file descriptor for %q", path) + } + + var attr attr + attr.attrSet = _MOUNT_ATTR_IDMAP + attr.attrClr = 0 + attr.propagation = 0 + attr.userNs = uint64(userNsFile.Fd()) + + defer userNsFile.Close() + + targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE) + if err != nil { + return err + } + defer unix.Close(targetDirFd) + + if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &attr, uint(unsafe.Sizeof(attr))); err != nil { + return err + } + if err := os.Mkdir(target, 0700); err != nil && !os.IsExist(err) { + return err + } + return moveMount(targetDirFd, target) +} + +// createUsernsProcess forks the current process and creates a user namespace using the specified +// mappings. It returns the pid of the new process. +func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int, func(), error) { + pid, _, err := syscall.Syscall6(uintptr(unix.SYS_CLONE), unix.CLONE_NEWUSER|uintptr(unix.SIGCHLD), 0, 0, 0, 0, 0) + if err != 0 { + return -1, nil, err + } + if pid == 0 { + _ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0) + // just wait for the SIGKILL + for { + syscall.Pause() + } + } + cleanupFunc := func() { + unix.Kill(int(pid), unix.SIGKILL) + _, _ = unix.Wait4(int(pid), nil, 0, nil) + } + writeMappings := func(fname string, idmap []idtools.IDMap) error { + mappings := "" + for _, m := range idmap { + mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600) + } + if err := writeMappings("uid_map", uidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + if err := writeMappings("gid_map", gidMaps); err != nil { + cleanupFunc() + return -1, nil, err + } + + return int(pid), cleanupFunc, nil +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 1efe7316d3e..8600ee68552 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -25,7 +26,6 @@ import ( "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -38,7 +38,6 @@ import ( "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" "golang.org/x/sys/unix" ) @@ -119,7 +118,8 @@ type Driver struct { supportsDType bool supportsVolatile *bool usingMetacopy bool - locker *locker.Locker + + supportsIDMappedMounts *bool } type additionalLayerStore struct { @@ -155,6 +155,15 @@ func hasMetacopyOption(opts []string) bool { return false } +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + func hasVolatileOption(opts []string) bool { for _, s := range opts { if s == "volatile" { @@ -195,6 +204,30 @@ func checkSupportVolatile(home, runhome string) (bool, error) { return usingVolatile, nil } +// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a +// idmapped lower layer. +func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { + if os.Geteuid() != 0 { + return false, nil + } + + feature := "idmapped-lower-dir" + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) + if err == nil { + if overlayCacheResult { + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported") + return true, nil + } + logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported") + return false, errors.New(overlayCacheText) + } + supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsIDMappedMounts, ""); err2 != nil { + return false, errors.Wrap(err2, "recording overlay idmapped mounts support status") + } + return supportsIDMappedMounts, err +} + func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) { var supportsDType bool @@ -282,6 +315,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) backingFs = fsName } + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if opts.mountProgram == "" { + if supported, err := SupportsNativeOverlay(home, runhome); err != nil { + return nil, err + } else if !supported { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.mountProgram = path + } + } + } + if opts.mountProgram != "" { if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { m := os.FileMode(0700) @@ -306,20 +364,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - - // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { - return nil, err - } - runhome := filepath.Join(options.RunRoot, filepath.Base(home)) - if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { - return nil, err - } - var usingMetacopy bool var supportsDType bool var supportsVolatile *bool @@ -380,7 +424,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsDType: supportsDType, usingMetacopy: usingMetacopy, supportsVolatile: supportsVolatile, - locker: locker.New(), options: *opts, } @@ -559,14 +602,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) ( return err } -func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { - if os.Geteuid() != 0 || graphroot == "" || rundir == "" { +func SupportsNativeOverlay(home, runhome string) (bool, error) { + if os.Geteuid() != 0 || home == "" || runhome == "" { return false, nil } - home := filepath.Join(graphroot, "overlay") - runhome := filepath.Join(rundir, "overlay") - var contents string flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home)) if err == nil { @@ -881,11 +921,18 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err != nil { return err } + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, + } + // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil { return err } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil { return err } if parent != "" { @@ -896,14 +943,26 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable rootUID = int(st.UID()) rootGID = int(st.GID()) } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + + if _, err := system.Lstat(dir); err == nil { + logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) + // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, + // so that we can’t end up with two symlinks in linkDir pointing to the same layer. + if err := d.Remove(id); err != nil { + return errors.Wrapf(err, "removing a pre-existing layer directory %q", dir) + } + } + + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { - os.RemoveAll(dir) + if err2 := os.RemoveAll(dir); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) + } } }() @@ -1039,17 +1098,22 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { + p, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, bool) { newpath := path.Join(d.home, id) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l + return l, true } } } - return newpath + return newpath, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1122,9 +1186,6 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -1145,6 +1206,9 @@ func (d *Driver) Remove(id string) error { // under each layer has a symlink created for it under the linkDir. If the symlink does not // exist, it creates them func (d *Driver) recreateSymlinks() error { + // We have at most 3 corrective actions per layer, so 10 iterations is plenty. + const maxIterations = 10 + // List all the directories under the home directory dirs, err := ioutil.ReadDir(d.home) if err != nil { @@ -1162,6 +1226,7 @@ func (d *Driver) recreateSymlinks() error { // Keep looping as long as we take some corrective action in each iteration var errs *multierror.Error madeProgress := true + iterations := 0 for madeProgress { errs = nil madeProgress = false @@ -1175,7 +1240,7 @@ func (d *Driver) recreateSymlinks() error { // Read the "link" file under each layer to get the name of the symlink data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir)) + errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name())) continue } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) @@ -1212,7 +1277,12 @@ func (d *Driver) recreateSymlinks() error { if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) // force the link to be recreated on the next pass - os.Remove(filepath.Join(linksDir, link.Name())) + if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil { + if !os.IsNotExist(err) { + errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link)) + } // else don’t report any error, but also don’t set madeProgress. + continue + } madeProgress = true continue } @@ -1222,6 +1292,8 @@ func (d *Driver) recreateSymlinks() error { linkFile := filepath.Join(d.dir(targetID), "link") data, err := ioutil.ReadFile(linkFile) if err != nil || string(data) != link.Name() { + // NOTE: If two or more links point to the same target, we will update linkFile + // with every value of link.Name(), and set madeProgress = true every time. if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID)) continue @@ -1229,6 +1301,11 @@ func (d *Driver) recreateSymlinks() error { madeProgress = true } } + iterations++ + if iterations >= maxIterations { + errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + break + } } if errs != nil { return errs.ErrorOrNil() @@ -1242,18 +1319,20 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) + dir, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err } - readWrite := true + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { disableShifting = true } + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } optsList := options.Options if len(optsList) == 0 { optsList = strings.Split(d.options.mountOptions, ",") @@ -1262,16 +1341,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // options otherwise the kernel refuses to follow the metacopy xattr. if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") optsList = append(optsList, "metacopy=on") - } else { - logLevel := logrus.WarnLevel - if unshare.IsRootless() { - logLevel = logrus.DebugLevel - } - logrus.StandardLogger().Logf(logLevel, "Ignoring metacopy option from storage.conf, not supported with booted kernel") } } } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel") + } + optsList = stripOption(optsList, "metacopy=on") + } + for _, o := range optsList { if o == "ro" { readWrite = false @@ -1322,7 +1403,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } for err == nil { absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN))) - relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN))) + relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN))) diffN++ st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) if err == nil && !permsKnown { @@ -1416,41 +1497,82 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO workdir := path.Join(dir, "work") - var opts string - if readWrite { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) - } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) - } - if len(optsList) > 0 { - opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts) - } - if d.options.mountProgram == "" && unshare.IsRootless() { - opts = fmt.Sprintf("%s,userxattr", opts) + optsList = append(optsList, "userxattr") } - // If "volatile" is not supported by the file system, just ignore the request - if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) { + if options.Volatile && !hasVolatileOption(optsList) { supported, err := d.getSupportsVolatile() if err != nil { return "", err } + // If "volatile" is not supported by the file system, just ignore the request if supported { - opts = fmt.Sprintf("%s,volatile", opts) + optsList = append(optsList, "volatile") } } + if d.supportsIDmappedMounts() && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 { + var newAbsDir []string + mappedRoot := filepath.Join(d.home, id, "mapped") + if err := os.MkdirAll(mappedRoot, 0700); err != nil { + return "", err + } + + pid, cleanupFunc, err := createUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + defer cleanupFunc() + + idMappedMounts := make(map[string]string) + + // rewrite the lower dirs to their idmapped mount. + c := 0 + for _, absLower := range absLowers { + mappedMountSrc := getMappedMountRoot(absLower) + + root, found := idMappedMounts[mappedMountSrc] + if !found { + root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) + c++ + if err := createIDMappedMount(mappedMountSrc, root, int(pid)); err != nil { + return "", errors.Wrapf(err, "create mapped mount for %q on %q", mappedMountSrc, root) + } + idMappedMounts[mappedMountSrc] = root + + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(root, unix.MNT_DETACH) + } + + // relative path to the layer through the id mapped mount + rel, err := filepath.Rel(mappedMountSrc, absLower) + if err != nil { + return "", err + } + + newAbsDir = append(newAbsDir, filepath.Join(root, rel)) + } + absLowers = newAbsDir + } + + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + } + mountData := label.FormatMountLabel(opts, options.MountLabel) mountFunc := unix.Mount mountTarget := mergedDir pageSize := unix.Getpagesize() - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. if d.options.mountProgram != "" { mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { if !disableShifting { @@ -1476,18 +1598,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } return nil } - } else if len(mountData) > pageSize { + } else if len(mountData) >= pageSize { + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + workdir = path.Join(id, "work") //FIXME: We need to figure out to get this to work with additional stores if readWrite { diffDir := path.Join(id, "diff") opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } mountData = label.FormatMountLabel(opts, options.MountLabel) - if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize) + if len(mountData) >= pageSize { + return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize) } mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) @@ -1513,8 +1643,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return err @@ -1529,6 +1657,18 @@ func (d *Driver) Put(id string) error { unmounted := false + mappedRoot := filepath.Join(d.home, id, "mapped") + // It should not happen, but cleanup any mapped mount if it was leaked. + if _, err := os.Stat(mappedRoot); err == nil { + mounts, err := ioutil.ReadDir(mappedRoot) + if err == nil { + // Go through all of the mapped mounts. + for _, m := range mounts { + _ = unix.Unmount(filepath.Join(mappedRoot, m.Name()), unix.MNT_DETACH) + } + } + } + if d.options.mountProgram != "" { // Attempt to unmount the FUSE mount using either fusermount or fusermount3. // If they fail, fallback to unix.Unmount @@ -1606,11 +1746,24 @@ func (d *Driver) getWhiteoutFormat() archive.WhiteoutFormat { return whiteoutFormat } -type fileGetNilCloser struct { - storage.FileGetter +type overlayFileGetter struct { + diffDirs []string +} + +func (g *overlayFileGetter) Get(path string) (io.ReadCloser, error) { + for _, d := range g.diffDirs { + f, err := os.Open(filepath.Join(d, path)) + if err == nil { + return f, nil + } + } + if len(g.diffDirs) > 0 { + return os.Open(filepath.Join(g.diffDirs[0], path)) + } + return nil, fmt.Errorf("%s: %w", path, os.ErrNotExist) } -func (f fileGetNilCloser) Close() error { +func (g *overlayFileGetter) Close() error { return nil } @@ -1619,13 +1772,18 @@ func (d *Driver) getStagingDir() string { } // DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. +// contains files for the layer differences, either for this layer, or one of our +// lowers if we're just a template directory. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { p, err := d.getDiffPath(id) if err != nil { return nil, err } - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil + paths, err := d.getLowerDiffPaths(id) + if err != nil { + return nil, err + } + return &overlayFileGetter{diffDirs: append([]string{p}, paths...)}, nil } // CleanupStagingDirectory cleanups the staging directory. @@ -1900,12 +2058,31 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp return nil } +// supportsIDmappedMounts returns whether the kernel supports using idmapped mounts with +// overlay lower layers. +func (d *Driver) supportsIDmappedMounts() bool { + if d.supportsIDMappedMounts != nil { + return *d.supportsIDMappedMounts + } + + supportsIDMappedMounts, err := checkAndRecordIDMappedSupport(d.home, d.runhome) + d.supportsIDMappedMounts = &supportsIDMappedMounts + if err == nil { + return supportsIDMappedMounts + } + logrus.Debugf("Check for idmapped mounts support %v", err) + return false +} + // SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS func (d *Driver) SupportsShifting() bool { if os.Getenv("_TEST_FORCE_SUPPORT_SHIFTING") == "yes-please" { return true } - return d.options.mountProgram != "" + if d.options.mountProgram != "" { + return true + } + return d.supportsIDmappedMounts() } // dumbJoin is more or less a dumber version of filepath.Join, but one which @@ -2074,3 +2251,15 @@ func redirectDiffIfAdditionalLayer(diffPath string) (string, error) { } return diffPath, nil } + +// getMappedMountRoot is a heuristic that calculates the parent directory where +// the idmapped mount should be applied. +// It is useful to minimize the number of idmapped mounts and at the same time use +// a common path as long as possible to reduce the length of the mount data argument. +func getMappedMountRoot(path string) string { + dirName := filepath.Dir(path) + if filepath.Base(dirName) == linkDir { + return filepath.Dir(dirName) + } + return dirName +} diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go index c748468e5cb..4623e7f4648 100644 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ b/vendor/github.com/containers/storage/drivers/register/register_zfs.go @@ -1,4 +1,4 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris package register diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go index e034bf152c9..f29dc8f8556 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go @@ -344,7 +344,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) error { return errors.Wrap(err, "error creating zfs mount") } defer func() { - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logrus.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } }() @@ -483,7 +483,7 @@ func (d *Driver) Put(id string) error { logger.Debugf(`unmount("%s")`, mountpoint) - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + if err := detachUnmount(mountpoint); err != nil { logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go index bf690515984..61a2ed871a4 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -2,7 +2,6 @@ package zfs import ( "fmt" - "strings" "github.com/containers/storage/drivers" "github.com/pkg/errors" @@ -26,14 +25,10 @@ func checkRootdirFs(rootdir string) error { } func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } + return id +} - return id[:maxlen] +func detachUnmount(mountpoint string) error { + // FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH + return unix.Unmount(mountpoint, unix.MNT_FORCE) } diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go index edcb1da36b7..44c68f394ec 100644 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -4,6 +4,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootDir string) error { @@ -27,3 +28,7 @@ func checkRootdirFs(rootDir string) error { func getMountpoint(id string) string { return id } + +func detachUnmount(mountpoint string) error { + return unix.Unmount(mountpoint, unix.MNT_DETACH) +} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index 5fc810b89d4..de6e377541c 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -1,6 +1,8 @@ package storage import ( + "errors" + "github.com/containers/storage/types" ) @@ -55,4 +57,9 @@ var ( ErrStoreIsReadOnly = types.ErrStoreIsReadOnly // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = types.ErrNotSupported + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = types.ErrInvalidMappings + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") ) diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index f870b9ceed3..0a06a43235f 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -1,6 +1,9 @@ package storage import ( + "fmt" + "strings" + "github.com/containers/storage/pkg/idtools" "github.com/google/go-intervals/intervalset" "github.com/pkg/errors" @@ -218,3 +221,45 @@ func maxInt(a, b int) int { } return a } + +func hasOverlappingRanges(mappings []idtools.IDMap) error { + hostIntervals := intervalset.Empty() + containerIntervals := intervalset.Empty() + + var conflicts []string + + for _, m := range mappings { + c := interval{start: m.ContainerID, end: m.ContainerID + m.Size} + h := interval{start: m.HostID, end: m.HostID + m.Size} + + added := false + overlaps := false + + containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + added = true + } + containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c})) + + hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps && !added { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + } + hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h})) + } + + if conflicts != nil { + if len(conflicts) == 1 { + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0]) + } + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", ")) + } + return nil +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index bca25a65b8c..a4c3ed22c75 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -136,8 +136,19 @@ type ImageStore interface { // SetNames replaces the list of names associated with an image with the // supplied values. The values are expected to be valid normalized // named image references. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + RemoveNames(id string, names []string) error + // Delete removes the record of the image. Delete(id string) error @@ -425,37 +436,36 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c if created.IsZero() { created = time.Now().UTC() } - if err == nil { - image = &Image{ - ID: id, - Digest: searchableDigest, - Digests: nil, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: created, - Flags: make(map[string]interface{}), - } - err := image.recomputeDigests() - if err != nil { - return nil, errors.Wrapf(err, "error validating digests for new image") - } - r.images = append(r.images, image) - r.idindex.Add(id) - r.byid[id] = image - for _, name := range names { - r.byname[name] = image - } - for _, digest := range image.Digests { - list := r.bydigest[digest] - r.bydigest[digest] = append(list, image) - } - err = r.Save() - image = copyImage(image) + + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err = image.recomputeDigests() + if err != nil { + return nil, errors.Wrapf(err, "error validating digests for new image") + } + r.images = append(r.images, image) + r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) } + err = r.Save() + image = copyImage(image) return image, err } @@ -506,26 +516,44 @@ func (i *Image) addNameToHistory(name string) { i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *imageStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *imageStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *imageStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) } - names = dedupeNames(names) - if image, ok := r.lookup(id); ok { - for _, name := range image.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherImage, ok := r.byname[name]; ok { - r.removeName(otherImage, name) - } - r.byname[name] = image - image.addNameToHistory(name) + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) } - image.Names = names - return r.Save() + r.byname[name] = image + image.addNameToHistory(name) } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + image.Names = names + return r.Save() } func (r *imageStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index fbf6ad3621f..bba8d7588f7 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -23,6 +23,7 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/tarlog" "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" @@ -220,8 +221,17 @@ type LayerStore interface { // SetNames replaces the list of names associated with a layer with the // supplied values. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the layer with the + // specified id. + AddNames(id string, names []string) error + + // RemoveNames remove the supplied values from the list of names associated with the layer with the + // specified id. + RemoveNames(id string, names []string) error + // Delete deletes a layer with the specified name or ID. Delete(id string) error @@ -398,14 +408,13 @@ func (r *layerStore) Load() error { if layer.Flags == nil { layer.Flags = make(map[string]interface{}) } - if cleanup, ok := layer.Flags[incompleteFlag]; ok { - if b, ok := cleanup.(bool); ok && b { - err = r.deleteInternal(layer.ID) - if err != nil { - break - } - shouldSave = true + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err = r.deleteInternal(layer.ID) + if err != nil { + break } + shouldSave = true } } } @@ -674,7 +683,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) } if layer.UncompressedDigest != "" { - r.byuncompressedsum[layer.CompressedDigest] = append(r.byuncompressedsum[layer.CompressedDigest], layer.ID) + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } if err := r.Save(); err != nil { r.driver.Remove(id) @@ -683,11 +692,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s return copyLayer(layer), nil } -func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) { if !r.IsReadWrite() { return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) } - size = -1 if err := os.MkdirAll(r.rundir, 0700); err != nil { return nil, -1, err } @@ -716,12 +724,32 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab parent = parentLayer.ID } var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings + var ( + templateMetadata string + templateCompressedDigest digest.Digest + templateCompressedSize int64 + templateUncompressedDigest digest.Digest + templateUncompressedSize int64 + templateCompressionType archive.Compression + templateUIDs, templateGIDs []uint32 + templateTSdata []byte + ) if moreOptions.TemplateLayer != "" { + var tserr error templateLayer, ok := r.lookup(moreOptions.TemplateLayer) if !ok { return nil, -1, ErrLayerUnknown } + templateMetadata = templateLayer.Metadata templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize + templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize + templateCompressionType = templateLayer.CompressionType + templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...) + templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID)) + if tserr != nil && !os.IsNotExist(tserr) { + return nil, -1, tserr + } } else { templateIDMappings = &idtools.IDMappings{} } @@ -733,6 +761,60 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab if mountLabel != "" { label.ReserveLabel(mountLabel) } + + // Before actually creating the layer, make a persistent record of it with incompleteFlag, + // so that future processes have a chance to delete it. + layer := &Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Metadata: templateMetadata, + Created: time.Now().UTC(), + CompressedDigest: templateCompressedDigest, + CompressedSize: templateCompressedSize, + UncompressedDigest: templateUncompressedDigest, + UncompressedSize: templateUncompressedSize, + CompressionType: templateCompressionType, + UIDs: templateUIDs, + GIDs: templateGIDs, + Flags: make(map[string]interface{}), + UIDMap: copyIDMap(moreOptions.UIDMap), + GIDMap: copyIDMap(moreOptions.GIDMap), + BigDataNames: []string{}, + } + r.layers = append(r.layers, layer) + r.idindex.Add(id) + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + for flag, value := range flags { + layer.Flags[flag] = value + } + layer.Flags[incompleteFlag] = true + + succeeded := false + cleanupFailureContext := "" + defer func() { + if !succeeded { + // On any error, try both removing the driver's data as well + // as the in-memory layer record. + if err2 := r.Delete(layer.ID); err2 != nil { + if cleanupFailureContext == "" { + cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site" + } + logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2) + } + } + }() + + err := r.Save() + if err != nil { + cleanupFailureContext = "saving incomplete layer metadata" + return nil, -1, err + } + idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap) opts := drivers.CreateOpts{ MountLabel: mountLabel, @@ -740,89 +822,67 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab IDMappings: idMappings, } if moreOptions.TemplateLayer != "" { - if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) - } - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer) + if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { + cleanupFailureContext = "creating a layer from template" + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) } oldMappings = templateIDMappings } else { if writeable { - if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating read-write layer") + if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-write layer" + return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) } } else { - if err = r.driver.Create(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating layer") + if err := r.driver.Create(id, parent, &opts); err != nil { + cleanupFailureContext = "creating a read-only layer" + return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) } } oldMappings = parentMappings } if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) { - if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { + cleanupFailureContext = "in UpdateLayerIDMap" return nil, -1, err } } - if err == nil { - layer = &Layer{ - ID: id, - Parent: parent, - Names: names, - MountLabel: mountLabel, - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), - UIDMap: copyIDMap(moreOptions.UIDMap), - GIDMap: copyIDMap(moreOptions.GIDMap), - BigDataNames: []string{}, - } - r.layers = append(r.layers, layer) - r.idindex.Add(id) - r.byid[id] = layer - for _, name := range names { - r.byname[name] = layer - } - for flag, value := range flags { - layer.Flags[flag] = value - } - if diff != nil { - layer.Flags[incompleteFlag] = true - err = r.Save() - if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) - return nil, -1, err - } - size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) - if err != nil { - if r.Delete(layer.ID) != nil { - // Either a driver error or an error saving. - // We now have a layer that's been marked for - // deletion but which we failed to remove. - } - return nil, -1, err - } - delete(layer.Flags, incompleteFlag) + if len(templateTSdata) > 0 { + if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil { + cleanupFailureContext = "creating tar-split parent directory for a copy from template" + return nil, -1, err + } + if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil { + cleanupFailureContext = "creating a tar-split copy from template" + return nil, -1, err } - err = r.Save() + } + + var size int64 = -1 + if diff != nil { + size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + cleanupFailureContext = "applying layer diff" return nil, -1, err } - layer = copyLayer(layer) + } else { + // applyDiffWithOptions in the `diff != nil` case handles this bit for us + if layer.CompressedDigest != "" { + r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID) + } + if layer.UncompressedDigest != "" { + r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) + } } + delete(layer.Flags, incompleteFlag) + err = r.Save() + if err != nil { + cleanupFailureContext = "saving finished layer metadata" + return nil, -1, err + } + + layer = copyLayer(layer) + succeeded = true return layer, size, err } @@ -854,7 +914,6 @@ func (r *layerStore) Mounted(id string) (int, error) { } func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error) { - // check whether options include ro option hasReadOnlyOpt := func(opts []string) bool { for _, item := range opts { @@ -1031,25 +1090,43 @@ func (r *layerStore) removeName(layer *Layer, name string) { layer.Names = stringSliceWithoutValue(layer.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *layerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *layerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *layerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) } - names = dedupeNames(names) - if layer, ok := r.lookup(id); ok { - for _, name := range layer.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherLayer, ok := r.byname[name]; ok { - r.removeName(otherLayer, name) - } - r.byname[name] = layer + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) } - layer.Names = names - return r.Save() + r.byname[name] = layer } - return ErrLayerUnknown + layer.Names = names + return r.Save() } func (r *layerStore) datadir(id string) string { @@ -1148,6 +1225,17 @@ func (r *layerStore) tspath(id string) string { return filepath.Join(r.layerdir, id+tarSplitSuffix) } +// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +func layerHasIncompleteFlag(layer *Layer) bool { + // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil + if flagValue, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := flagValue.(bool); ok && b { + return true + } + } + return false +} + func (r *layerStore) deleteInternal(id string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) @@ -1156,6 +1244,18 @@ func (r *layerStore) deleteInternal(id string) error { if !ok { return ErrLayerUnknown } + // Ensure that if we are interrupted, the layer will be cleaned up. + if !layerHasIncompleteFlag(layer) { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[incompleteFlag] = true + if err := r.Save(); err != nil { + return err + } + } + // We never unset incompleteFlag; below, we remove the entire object from r.layers. + id = layer.ID err := r.driver.Remove(id) if err != nil { @@ -1463,34 +1563,48 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } return maybeCompressReadCloser(diff) } - defer tsfile.Close() decompressor, err := pgzip.NewReader(tsfile) if err != nil { - return nil, err - } - defer decompressor.Close() - - tsbytes, err := ioutil.ReadAll(decompressor) - if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } return nil, err } - metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + metadata = storage.NewJSONUnpacker(decompressor) fgetter, err := r.newFileGetter(to) if err != nil { - return nil, err + errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter")) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + return nil, errs.ErrorOrNil() } tarstream := asm.NewOutputTarStream(fgetter, metadata) rc := ioutils.NewReadCloserWrapper(tarstream, func() error { - err1 := tarstream.Close() - err2 := fgetter.Close() - if err2 == nil { - return err1 + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream")) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter")) + } + if errs != nil { + return errs.ErrorOrNil() } - return err2 + return nil }) return maybeCompressReadCloser(rc) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 76544ff289b..d4f129ee634 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -7,6 +7,7 @@ import ( "compress/bzip2" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -77,6 +78,10 @@ const ( containersOverrideXattr = "user.containers.override_stat" ) +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + // Archiver allows the reuse of most utility functions of this package with a // pluggable Untar function. To facilitate the passing of specific id mappings // for untar, an archiver can be created with maps which will then be passed to @@ -507,6 +512,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } hdr, err := FileInfoHeader(name, fi, link) if err != nil { @@ -743,6 +752,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errs []string for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { // We ignore errors here because not all graphdrivers support @@ -852,14 +864,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) rebaseName := options.RebaseNames[include] walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error { if err != nil { logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) { // Error getting relative path OR we are looking // at the source directory path. Skip in both situations. return nil @@ -892,7 +904,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. - if !f.IsDir() { + if !d.IsDir() { return nil } @@ -962,7 +974,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) buffer := make([]byte, 1<<20) + doChown := !options.NoLchown if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false uid, gid, mode, err := GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) @@ -1067,7 +1082,7 @@ loop: chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go index 2f548b661ce..51fbd9a2197 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go @@ -36,7 +36,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index bbbd8c9de87..8769f2291b6 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -1,9 +1,11 @@ +//go:build !linux // +build !linux package archive import ( "fmt" + "io/fs" "os" "path/filepath" "runtime" @@ -41,7 +43,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInfo, error) { root := newRootFileInfo(idMappings) - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + err := filepath.WalkDir(sourceDir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 14ffad5c0d4..ca8832fe421 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -4,6 +4,7 @@ import ( "archive/tar" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -134,7 +135,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if err != nil { return 0, err } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index e874eb74e05..482e036630f 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -5,9 +5,7 @@ import ( "fmt" "io" "io/ioutil" - "net" "os" - "os/user" "path/filepath" "sync" @@ -17,13 +15,6 @@ import ( "github.com/pkg/errors" ) -func init() { - // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host - // environment not in the chroot from untrusted files. - _, _ = user.Lookup("storage") - _, _ = net.LookupHost("localhost") -} - // NewArchiver returns a new Archiver which uses chrootarchive.Untar func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { archiver := archive.NewArchiver(idMappings) diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go index 76c94c6c1e9..58729ec8cc7 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -3,7 +3,9 @@ package chrootarchive import ( "fmt" "io/ioutil" + "net" "os" + "os/user" "path/filepath" "github.com/containers/storage/pkg/mount" @@ -23,6 +25,11 @@ func chroot(path string) (err error) { return err } + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("storage") + _, _ = net.LookupHost("localhost") + // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { return realChroot(path) diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go new file mode 100644 index 00000000000..b8b278a1329 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -0,0 +1,627 @@ +package chunked + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + storage "github.com/containers/storage" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + jsoniter "github.com/json-iterator/go" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + cacheKey = "chunked-manifest-cache" + cacheVersion = 1 +) + +type metadata struct { + tagLen int + digestLen int + tags []byte + vdata []byte +} + +type layer struct { + id string + metadata *metadata + target string +} + +type layersCache struct { + layers []layer + refs int + store storage.Store + mutex sync.RWMutex + created time.Time +} + +var cacheMutex sync.Mutex +var cache *layersCache + +func (c *layersCache) release() { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + c.refs-- + if c.refs == 0 { + cache = nil + } +} + +func getLayersCacheRef(store storage.Store) *layersCache { + cacheMutex.Lock() + defer cacheMutex.Unlock() + if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + cache.refs++ + return cache + } + cache := &layersCache{ + store: store, + refs: 1, + created: time.Now(), + } + return cache +} + +func getLayersCache(store storage.Store) (*layersCache, error) { + c := getLayersCacheRef(store) + + if err := c.load(); err != nil { + c.release() + return nil, err + } + return c, nil +} + +func (c *layersCache) load() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + allLayers, err := c.store.Layers() + if err != nil { + return err + } + existingLayers := make(map[string]string) + for _, r := range c.layers { + existingLayers[r.id] = r.target + } + + currentLayers := make(map[string]string) + for _, r := range allLayers { + currentLayers[r.ID] = r.ID + if _, found := existingLayers[r.ID]; found { + continue + } + + bigData, err := c.store.LayerBigData(r.ID, cacheKey) + // if the cache areadly exists, read and use it + if err == nil { + defer bigData.Close() + metadata, err := readMetadataFromCache(bigData) + if err == nil { + c.addLayer(r.ID, metadata) + continue + } + logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) + } else if errors.Cause(err) != os.ErrNotExist { + return err + } + + // otherwise create it from the layer TOC. + manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + if err != nil { + continue + } + defer manifestReader.Close() + + manifest, err := ioutil.ReadAll(manifestReader) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + + metadata, err := writeCache(manifest, r.ID, c.store) + if err == nil { + c.addLayer(r.ID, metadata) + } + } + + var newLayers []layer + for _, l := range c.layers { + if _, found := currentLayers[l.id]; found { + newLayers = append(newLayers, l) + } + } + c.layers = newLayers + + return nil +} + +// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file +// is usable for deduplication with hardlinks. +// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. +func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { + digester := digest.Canonical.Digester() + + modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) + hash := digester.Hash() + + if _, err := hash.Write([]byte(f.Digest)); err != nil { + return "", err + } + + if _, err := hash.Write([]byte(modeString)); err != nil { + return "", err + } + + if len(f.Xattrs) > 0 { + keys := make([]string, 0, len(f.Xattrs)) + for k := range f.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if _, err := hash.Write([]byte(k)); err != nil { + return "", err + } + if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil { + return "", err + } + } + } + return string(digester.Digest()), nil +} + +// generateFileLocation generates a file location in the form $OFFSET@$PATH +func generateFileLocation(path string, offset uint64) []byte { + return []byte(fmt.Sprintf("%d@%s", offset, path)) +} + +// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. +// the [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the metadata file header. +func generateTag(digest string, offset, len uint64) string { + return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +} + +type setBigData interface { + // SetLayerBigData stores a (possibly large) chunk of named data + SetLayerBigData(id, key string, data io.Reader) error +} + +// writeCache write a cache for the layer ID. +// It generates a sorted list of digests with their offset to the path location and offset. +// The same cache is used to lookup files, chunks and candidates for deduplication with hard links. +// There are 3 kind of digests stored: +// - digest(file.payload)) +// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) +// - digest(i) for each i in chunks(file payload) +func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) { + var vdata bytes.Buffer + tagLen := 0 + digestLen := 0 + var tagsBuffer bytes.Buffer + + toc, err := prepareMetadata(manifest) + if err != nil { + return nil, err + } + + var tags []string + for _, k := range toc { + if k.Digest != "" { + location := generateFileLocation(k.Name, 0) + + off := uint64(vdata.Len()) + l := uint64(len(location)) + + d := generateTag(k.Digest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + fp, err := calculateHardLinkFingerprint(k) + if err != nil { + return nil, err + } + d = generateTag(fp, off, l) + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + + digestLen = len(k.Digest) + } + if k.ChunkDigest != "" { + location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + off := uint64(vdata.Len()) + l := uint64(len(location)) + d := generateTag(k.ChunkDigest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + digestLen = len(k.ChunkDigest) + } + } + + sort.Strings(tags) + + for _, t := range tags { + if _, err := tagsBuffer.Write([]byte(t)); err != nil { + return nil, err + } + } + + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer pipeWriter.Close() + defer close(errChan) + + // version + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { + errChan <- err + return + } + + // len of a tag + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { + errChan <- err + return + } + + // len of a digest + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { + errChan <- err + return + } + + // tags length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + errChan <- err + return + } + + // vdata length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { + errChan <- err + return + } + + // tags + if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { + errChan <- err + return + } + + // variable length data + if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { + errChan <- err + return + } + + errChan <- nil + }() + defer pipeReader.Close() + + counter := ioutils.NewWriteCounter(ioutil.Discard) + + r := io.TeeReader(pipeReader, counter) + + if err := dest.SetLayerBigData(id, cacheKey, r); err != nil { + return nil, err + } + + if err := <-errChan; err != nil { + return nil, err + } + + logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) + + return &metadata{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + }, nil +} + +func readMetadataFromCache(bigData io.Reader) (*metadata, error) { + var version, tagLen, digestLen, tagsLen, vdataLen uint64 + if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { + return nil, err + } + if version != cacheVersion { + return nil, nil + } + if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil { + return nil, err + } + + tags := make([]byte, tagsLen) + if _, err := bigData.Read(tags); err != nil { + return nil, err + } + + vdata := make([]byte, vdataLen) + if _, err := bigData.Read(vdata); err != nil { + return nil, err + } + + return &metadata{ + tagLen: int(tagLen), + digestLen: int(digestLen), + tags: tags, + vdata: vdata, + }, nil +} + +func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { + toc, err := unmarshalToc(manifest) + if err != nil { + // ignore errors here. They might be caused by a different manifest format. + return nil, nil + } + + var r []*internal.FileMetadata + chunkSeen := make(map[string]bool) + for i := range toc.Entries { + d := toc.Entries[i].Digest + if d != "" { + r = append(r, &toc.Entries[i]) + continue + } + + // chunks do not use hard link dedup so keeping just one candidate is enough + cd := toc.Entries[i].ChunkDigest + if cd != "" && !chunkSeen[cd] { + r = append(r, &toc.Entries[i]) + chunkSeen[cd] = true + } + } + return r, nil +} + +func (c *layersCache) addLayer(id string, metadata *metadata) error { + target, err := c.store.DifferTarget(id) + if err != nil { + return fmt.Errorf("get checkout directory layer %q: %w", id, err) + } + + l := layer{ + id: id, + metadata: metadata, + target: target, + } + c.layers = append(c.layers, l) + return nil +} + +func byteSliceAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func findTag(digest string, metadata *metadata) (string, uint64, uint64) { + if len(digest) != metadata.digestLen { + return "", 0, 0 + } + + nElements := len(metadata.tags) / metadata.tagLen + + i := sort.Search(nElements, func(i int) bool { + d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + return strings.Compare(d, digest) >= 0 + }) + if i < nElements { + d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + if digest == d { + startOff := i*metadata.tagLen + metadata.digestLen + parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) + return digest, uint64(off), uint64(len) + } + } + return "", 0, 0 +} + +func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { + if digest == "" { + return "", "", -1, nil + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, layer := range c.layers { + digest, off, len := findTag(digest, layer.metadata) + if digest != "" { + position := string(layer.metadata.vdata[off : off+len]) + parts := strings.SplitN(position, "@", 2) + offFile, _ := strconv.ParseInt(parts[0], 10, 64) + return layer.target, parts[1], offFile, nil + } + } + + return "", "", -1, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// file is the file to look for. +func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { + digest := file.Digest + if useHardLinks { + var err error + digest, err = calculateHardLinkFingerprint(file) + if err != nil { + return "", "", err + } + } + target, name, off, err := c.findDigestInternal(digest) + if off == 0 { + return target, name, err + } + return "", "", nil +} + +func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { + return c.findDigestInternal(chunk.ChunkDigest) +} + +func unmarshalToc(manifest []byte) (*internal.TOC, error) { + var buf bytes.Buffer + count := 0 + var toc internal.TOC + + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + count += len(iter.ReadStringAsSlice()) + case "xattrs": + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + count += len(iter.ReadStringAsSlice()) + } + default: + iter.Skip() + } + } + } + break + } + + buf.Grow(count) + + getString := func(b []byte) string { + from := buf.Len() + buf.Write(b) + to := buf.Len() + return byteSliceAsString(buf.Bytes()[from:to]) + } + + iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field == "version" { + toc.Version = iter.ReadInt() + continue + } + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type": + m.Type = getString(iter.ReadStringAsSlice()) + case "name": + m.Name = getString(iter.ReadStringAsSlice()) + case "linkName": + m.Linkname = getString(iter.ReadStringAsSlice()) + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "UID": + m.UID = iter.ReadInt() + case "GID": + m.GID = iter.ReadInt() + case "ModTime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devMajor": + m.Devmajor = iter.ReadInt64() + case "devMinor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = getString(iter.ReadStringAsSlice()) + case "offset": + m.Offset = iter.ReadInt64() + case "endOffset": + m.EndOffset = iter.ReadInt64() + case "chunkSize": + m.ChunkSize = iter.ReadInt64() + case "chunkOffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkDigest": + m.ChunkDigest = getString(iter.ReadStringAsSlice()) + case "chunkType": + m.ChunkType = getString(iter.ReadStringAsSlice()) + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + value := iter.ReadStringAsSlice() + m.Xattrs[key] = getString(value) + } + default: + iter.Skip() + } + } + toc.Entries = append(toc.Entries, m) + } + break + } + toc.StringsBuf = buf + return &toc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 092cf584af3..aeb7cfd4f01 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -5,6 +5,7 @@ package compressor // larger software like the graph drivers. import ( + "bufio" "encoding/base64" "io" "io/ioutil" @@ -15,6 +16,189 @@ import ( "github.com/vbatts/tar-split/archive/tar" ) +const RollsumBits = 16 +const holesThreshold = int64(1 << 10) + +type holesFinder struct { + reader *bufio.Reader + fileOff int64 + zeros int64 + from int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// ReadByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) ReadByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := int64(len(b)) + if rc.pendingHole < toCopy { + toCopy = rc.pendingHole + } + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := 0; i < len(b); i++ { + holeLen, n, err := rc.reader.ReadByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) @@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r if _, err := zstdWriter.Write(rawBytes); err != nil { return err } - payloadDigester := digest.Canonical.Digester() - payloadChecksum := payloadDigester.Hash() - payloadDest := io.MultiWriter(payloadChecksum, zstdWriter) + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() // Now handle the payload, if any - var startOffset, endOffset int64 + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) for { - read, errRead := tr.Read(buf) + mustSplit, read, errRead := rcReader.Read(buf) if errRead != nil && errRead != io.EOF { return err } - - // restart the compression only if there is - // a payload. + // restart the compression only if there is a payload. if read > 0 { if startOffset == 0 { startOffset, err = restartCompression() if err != nil { return err } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err } - _, err := payloadDest.Write(buf[:read]) + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() if err != nil { return err } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := internal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = internal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) } if errRead == io.EOF { if startOffset > 0 { - endOffset, err = restartCompression() - if err != nil { - return err - } checksum = payloadDigester.Digest().String() } break @@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r for k, v := range hdr.Xattrs { xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) } - m := internal.FileMetadata{ - Type: typ, - Name: hdr.Name, - Linkname: hdr.Linkname, - Mode: hdr.Mode, - Size: hdr.Size, - UID: hdr.Uid, - GID: hdr.Gid, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - Devmajor: hdr.Devmajor, - Devminor: hdr.Devminor, - Xattrs: xattrs, - Digest: checksum, - Offset: startOffset, - EndOffset: endOffset, - - // ChunkSize is 0 for the last chunk - ChunkSize: 0, - ChunkOffset: 0, - ChunkDigest: checksum, - } - metadata = append(metadata, m) + entries := []internal.FileMetadata{ + { + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: &hdr.ModTime, + AccessTime: &hdr.AccessTime, + ChangeTime: &hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Digest: checksum, + Offset: startOffset, + EndOffset: lastOffset, + }, + } + for i := 1; i < len(chunks); i++ { + entries = append(entries, internal.FileMetadata{ + Type: internal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) } rawBytes := tr.RawBytes() @@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { if level == nil { - l := 3 + l := 10 level = &l } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 00000000000..f4dfad822e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const windowSize = 64 // Roll assumes windowSize is a power of 2 +const charOffset = 31 + +const blobBits = 13 +const blobSize = 1 << blobBits // 8k + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index c91c43d85cd..3bb5286d92d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -8,11 +8,11 @@ import ( "archive/tar" "bytes" "encoding/binary" - "encoding/json" "fmt" "io" "time" + jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" ) @@ -20,6 +20,9 @@ import ( type TOC struct { Version int `json:"version"` Entries []FileMetadata `json:"entries"` + + // internal: used by unmarshalToc + StringsBuf bytes.Buffer `json:"-"` } type FileMetadata struct { @@ -27,25 +30,33 @@ type FileMetadata struct { Name string `json:"name"` Linkname string `json:"linkName,omitempty"` Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size"` - UID int `json:"uid"` - GID int `json:"gid"` - ModTime time.Time `json:"modtime"` - AccessTime time.Time `json:"accesstime"` - ChangeTime time.Time `json:"changetime"` - Devmajor int64 `json:"devMajor"` - Devminor int64 `json:"devMinor"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` Xattrs map[string]string `json:"xattrs,omitempty"` Digest string `json:"digest,omitempty"` Offset int64 `json:"offset,omitempty"` EndOffset int64 `json:"endOffset,omitempty"` - // Currently chunking is not supported. ChunkSize int64 `json:"chunkSize,omitempty"` ChunkOffset int64 `json:"chunkOffset,omitempty"` ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` + + // internal: computed by mergeTOCEntries. + Chunks []*FileMetadata `json:"-"` } +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + const ( TypeReg = "reg" TypeChunk = "chunk" @@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off Entries: metadata, } + var json = jsoniter.ConfigCompatibleWithStandardLibrary // Generate the manifest manifest, err := json.Marshal(toc) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 6efc6a4c845..9434499d2d9 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -4,8 +4,8 @@ import ( archivetar "archive/tar" "context" "encoding/base64" - "encoding/json" "fmt" + "hash" "io" "io/ioutil" "os" @@ -13,6 +13,8 @@ import ( "reflect" "sort" "strings" + "sync" + "sync/atomic" "syscall" "time" @@ -25,6 +27,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" + securejoin "github.com/cyphar/filepath-securejoin" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -41,24 +44,35 @@ const ( bigDataKey = "zstd-chunked-manifest" fileTypeZstdChunked = iota - fileTypeEstargz = iota + fileTypeEstargz + fileTypeNoCompression + fileTypeHole + + copyGoRoutines = 32 ) type compressedFileType int type chunkedDiffer struct { - stream ImageSourceSeekable - manifest []byte - layersMetadata map[string][]internal.FileMetadata - layersTarget map[string]string - tocOffset int64 - fileType compressedFileType + stream ImageSourceSeekable + manifest []byte + layersCache *layersCache + tocOffset int64 + fileType compressedFileType + + copyBuffer []byte gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader +} + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, } -func timeToTimespec(time time.Time) (ts unix.Timespec) { - if time.IsZero() { +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) @@ -67,11 +81,29 @@ func timeToTimespec(time time.Time) (ts unix.Timespec) { return unix.NsecToTimespec(time.UnixNano()) } +func doHardLink(srcFd int, destDirFd int, destBase string) error { + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) + return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + unix.Unlinkat(destDirFd, destBase, 0) + return doLink() + } + return err +} + func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) } copyWithFileRange, copyWithFileClone := true, true @@ -83,20 +115,7 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us if err == nil { defer destDir.Close() - doLink := func() error { - // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses - // /proc/self/fd doesn't and can be used with rootless. - srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) - return unix.Linkat(unix.AT_FDCWD, srcPath, int(destDir.Fd()), destBase, unix.AT_SYMLINK_FOLLOW) - } - - err := doLink() - - // if the destination exists, unlink it first and try again - if err != nil && os.IsExist(err) { - unix.Unlinkat(int(destDir.Fd()), destBase, 0) - err = doLink() - } + err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { return nil, st.Size(), nil } @@ -106,63 +125,15 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us // If the destination file already exists, we shouldn't blow it away dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) } err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) if err != nil { dstFile.Close() - return nil, -1, err - } - return dstFile, st.Size(), err -} - -func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string][]*internal.FileMetadata { - maps := make(map[string]map[string][]*internal.FileMetadata) - - for layerID, v := range layersMetadata { - r := make(map[string][]*internal.FileMetadata) - for i := range v { - if v[i].Digest != "" { - r[v[i].Digest] = append(r[v[i].Digest], &v[i]) - } - } - maps[layerID] = r - } - return maps -} - -func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) { - allLayers, err := store.Layers() - if err != nil { - return nil, nil, err - } - - layersMetadata := make(map[string][]internal.FileMetadata) - layersTarget := make(map[string]string) - for _, r := range allLayers { - manifestReader, err := store.LayerBigData(r.ID, bigDataKey) - if err != nil { - continue - } - defer manifestReader.Close() - manifest, err := ioutil.ReadAll(manifestReader) - if err != nil { - return nil, nil, err - } - var toc internal.TOC - if err := json.Unmarshal(manifest, &toc); err != nil { - continue - } - layersMetadata[r.ID] = toc.Entries - target, err := store.DifferTarget(r.ID) - if err != nil { - return nil, nil, err - } - layersTarget[r.ID] = target + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) } - - return layersMetadata, layersTarget, nil + return dstFile, st.Size(), nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. @@ -179,67 +150,71 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeZstdChunked, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeZstdChunked, }, nil } func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeEstargz, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeEstargz, }, nil } +func makeCopyBuffer() []byte { + return make([]byte, 2<<20) +} + // copyFileFromOtherLayer copies a file from another layer // file is the file to look for. // source is the path to the source layer checkout. -// otherFile contains the metadata for the file. +// name is the path to the file to copy in source. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file: %w", err) } defer unix.Close(srcDirfd) - srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0) + srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err) } defer srcFile.Close() dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } - return true, dstFile, written, err + return true, dstFile, written, nil } // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. @@ -275,10 +250,6 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return false } - xattrsToIgnore := map[string]interface{}{ - "security.selinux": true, - } - xattrs := make(map[string]string) for _, x := range listXattrs { v, err := system.Lgetxattr(path, x) @@ -301,45 +272,9 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return canDedupMetadataWithHardLink(file, &otherFile) } -// findFileInOtherLayers finds the specified file in other layers. -// file is the file to look for. -// dirfd is an open file descriptor to the checkout root directory. -// layersMetadata contains the metadata for each layer in the storage. -// layersTarget maps each layer to its checkout on disk. -// useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOtherLayers(file *internal.FileMetadata, dirfd int, layersMetadata map[string]map[string][]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) { - // this is ugly, needs to be indexed - for layerID, checksums := range layersMetadata { - source, ok := layersTarget[layerID] - if !ok { - continue - } - files, found := checksums[file.Digest] - if !found { - continue - } - for _, candidate := range files { - // check if it is a valid candidate to dedup file - if useHardLinks && !canDedupMetadataWithHardLink(file, candidate) { - continue - } - - found, dstFile, written, err := copyFileFromOtherLayer(file, source, candidate, dirfd, useHardLinks) - if found && err == nil { - return found, dstFile, written, err - } - } - } - // If hard links deduplication was used and it has failed, try again without hard links. - if useHardLinks { - return findFileInOtherLayers(file, dirfd, layersMetadata, layersTarget, false) - } - return false, nil, 0, nil -} - -func getFileDigest(f *os.File) (digest.Digest, error) { +func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { digester := digest.Canonical.Digester() - if _, err := io.Copy(digester.Hash(), f); err != nil { + if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { return "", err } return digester.Digest(), nil @@ -401,7 +336,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di // file is the file to look for. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { sourceFile := filepath.Clean(filepath.Join("/", file.Name)) if !strings.HasPrefix(sourceFile, "/usr/") { // limit host deduplication to files under /usr. @@ -430,7 +365,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return false, nil, 0, err } - checksum, err := getFileDigest(f) + checksum, err := getFileDigest(f, buf) if err != nil { return false, nil, 0, err } @@ -452,7 +387,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool dstFile.Close() return false, nil, 0, err } - checksum, err = getFileDigest(f) + checksum, err = getFileDigest(f, buf) if err != nil { dstFile.Close() return false, nil, 0, err @@ -464,6 +399,19 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return true, dstFile, written, nil } +// findFileInOtherLayers finds the specified file in other layers. +// cache is the layers cache to use. +// file is the file to look for. +// dirfd is an open file descriptor to the checkout root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + target, name, err := cache.findFileInOtherLayers(file, useHardLinks) + if err != nil || name == "" { + return false, nil, 0, err + } + return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) +} + func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil @@ -490,22 +438,50 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption return nil } -type missingFile struct { - File *internal.FileMetadata +type originFile struct { + Root string + Path string + Offset int64 +} + +type missingFileChunk struct { Gap int64 + Hole bool + + File *internal.FileMetadata + + CompressedSize int64 + UncompressedSize int64 } -func (m missingFile) Length() int64 { - return m.File.EndOffset - m.File.Offset +type missingPart struct { + Hole bool + SourceChunk *ImageSourceChunk + OriginFile *originFile + Chunks []missingFileChunk } -type missingChunk struct { - RawChunk ImageSourceChunk - Files []missingFile +func (o *originFile) OpenFile() (io.ReadCloser, error) { + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file under target rootfs: %w", err) + } + + if _, err := srcFile.Seek(o.Offset, 0); err != nil { + srcFile.Close() + return nil, err + } + return srcFile, nil } // setFileAttrs sets the file attributes for file given metadata -func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { if file == nil || file.Fd() < 0 { return errors.Errorf("invalid file") } @@ -515,211 +491,611 @@ func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetada if err != nil { return err } + + // If it is a symlink, force to use the path if t == tar.TypeSymlink { - return nil + usePath = true + } + + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + if usePath { + return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchown(fd, metadata.UID, metadata.GID) + } + + doSetXattr := func(k string, v []byte) error { + return unix.Fsetxattr(fd, k, v, 0) + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if usePath { + return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) } - if err := unix.Fchown(fd, metadata.UID, metadata.GID); err != nil { + doChmod := func() error { + if usePath { + return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchmod(fd, uint32(mode)) + } + + if err := doChown(); err != nil { if !options.IgnoreChownErrors { - return err + return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) } } + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } data, err := base64.StdEncoding.DecodeString(v) if err != nil { - return err + return fmt.Errorf("decode xattr %q: %w", v, err) } - if err := unix.Fsetxattr(fd, k, data, 0); err != nil { - return err + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) } } - ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} - if err := unix.UtimesNanoAt(fd, "", ts, 0); err != nil && errors.Is(err, unix.ENOSYS) { - return err + if err := doUtimes(); !canIgnore(err) { + return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) } - if err := unix.Fchmod(fd, uint32(mode)); err != nil { - return err + if err := doChmod(); !canIgnore(err) { + return fmt.Errorf("chmod %q: %w", metadata.Name, err) } return nil } -// openFileUnderRoot safely opens a file under the specified root directory using openat2 -// name is the path to open relative to dirfd. -// dirfd is an open file descriptor to the target checkout directory. -// flags are the flags top pass to the open syscall. -// mode specifies the mode to use for newly created files. -func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := fmt.Sprintf("/proc/self/fd/%d", dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + fd := -1 + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName := filepath.Dir(name) + if dirName != "" { + newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return -1, err + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } + + target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, Mode: uint64(mode & 07777), Resolve: unix.RESOLVE_IN_ROOT, } + return unix.Openat2(dirfd, name, &how) +} - fd, err := unix.Openat2(dirfd, name, &how) - if err != nil { - return nil, err +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } } - return os.NewFile(uintptr(fd), name), nil + return fd, err } -func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) { - file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) - if err != nil { - return err - } - defer func() { - err2 := file.Close() - if err == nil { - err = err2 +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } } - }() + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} - digester := digest.Canonical.Digester() - checksum := digester.Hash() - to := io.MultiWriter(file, checksum) +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() - switch c.fileType { - case fileTypeZstdChunked: - z, err := zstd.NewReader(reader) - if err != nil { - return err - } - defer z.Close() + baseName := filepath.Base(name) - if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil { - return err + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + return nil, err + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { - return err + } + return nil, err +} + +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { + switch { + case partCompression == fileTypeHole: + // The entire part is a hole. Do not need to read from a file. + c.rawReader = nil + return fileTypeHole, nil + case mf.Hole: + // Only the missing chunk in the requested part refers to a hole. + // The received data must be discarded. + limitReader := io.LimitReader(from, mf.CompressedSize) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + return fileTypeHole, err + case partCompression == fileTypeZstdChunked: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.zstdReader == nil { + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.zstdReader = r + } else { + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } } - case fileTypeEstargz: + case partCompression == fileTypeEstargz: + c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.gzipReader == nil { - r, err := pgzip.NewReader(reader) + r, err := pgzip.NewReader(c.rawReader) if err != nil { - return err + return partCompression, err } c.gzipReader = r } else { - if err := c.gzipReader.Reset(reader); err != nil { - return err + if err := c.gzipReader.Reset(c.rawReader); err != nil { + return partCompression, err } } - defer c.gzipReader.Close() + case partCompression == fileTypeNoCompression: + c.rawReader = io.LimitReader(from, mf.UncompressedSize) + default: + return partCompression, fmt.Errorf("unknown file type %q", c.fileType) + } + return partCompression, nil +} - if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil { +// hashHole writes SIZE zeros to the specified hasher +func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { + count := int64(len(copyBuffer)) + if size < count { + count = size + } + for i := int64(0); i < count; i++ { + copyBuffer[i] = 0 + } + for size > 0 { + count = int64(len(copyBuffer)) + if size < count { + count = size + } + if _, err := h.Write(copyBuffer[:count]); err != nil { return err } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { + size -= count + } + return nil +} + +// appendHole creates a hole with the specified size at the open fd. +func appendHole(fd int, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return err + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return err + } + return nil +} + +func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { + switch compression { + case fileTypeZstdChunked: + defer c.zstdReader.Reset(nil) + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeEstargz: + defer c.gzipReader.Close() + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeNoCompression: + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeHole: + if err := appendHole(int(destFile.file.Fd()), size); err != nil { + return err + } + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { return err } default: return fmt.Errorf("unknown file type %q", c.fileType) } + return nil +} + +type destinationFile struct { + dirfd int + file *os.File + digester digest.Digester + hash hash.Hash + to io.Writer + metadata *internal.FileMetadata + options *archive.TarOptions +} + +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { + file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + if err != nil { + return nil, err + } - manifestChecksum, err := digest.Parse(metadata.Digest) + digester := digest.Canonical.Digester() + hash := digester.Hash() + to := io.MultiWriter(file, hash) + + return &destinationFile{ + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + }, nil +} + +func (d *destinationFile) Close() error { + manifestChecksum, err := digest.Parse(d.metadata.Digest) if err != nil { return err } - if digester.Digest() != manifestChecksum { - return fmt.Errorf("checksum mismatch for %q", dest) + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) + } + + return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) +} + +func closeDestinationFiles(files chan *destinationFile, errors chan error) { + for f := range files { + errors <- f.Close() } - return setFileAttrs(file, mode, metadata, options) + close(errors) } -func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { - for mc := 0; ; mc++ { - var part io.ReadCloser - select { - case p := <-streams: - part = p - case err := <-errs: - return err - } - if part == nil { - if mc == len(missingChunks) { - break +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { + var destFile *destinationFile + + filesToClose := make(chan *destinationFile, 3) + closeFilesErrors := make(chan error, 2) + + go closeDestinationFiles(filesToClose, closeFilesErrors) + defer func() { + close(filesToClose) + for e := range closeFilesErrors { + if e != nil && Err == nil { + Err = e } - return errors.Errorf("invalid stream returned") } - if mc == len(missingChunks) { - part.Close() - return errors.Errorf("too many chunks returned") + }() + + for _, missingPart := range missingParts { + var part io.ReadCloser + partCompression := c.fileType + switch { + case missingPart.Hole: + partCompression = fileTypeHole + case missingPart.OriginFile != nil: + var err error + part, err = missingPart.OriginFile.OpenFile() + if err != nil { + return err + } + partCompression = fileTypeNoCompression + case missingPart.SourceChunk != nil: + select { + case p := <-streams: + part = p + case err := <-errs: + if err == nil { + return errors.New("not enough data returned from the server") + } + return err + } + if part == nil { + return errors.Errorf("invalid stream returned") + } + default: + return errors.Errorf("internal error: missing part misses both local and remote data stream") } - for _, mf := range missingChunks[mc].Files { + for _, mf := range missingPart.Chunks { if mf.Gap > 0 { limitReader := io.LimitReader(part, mf.Gap) - _, err := io.Copy(ioutil.Discard, limitReader) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) if err != nil { - part.Close() - return err + Err = err + goto exit } continue } - limitReader := io.LimitReader(part, mf.Length()) + if mf.File.Name == "" { + Err = errors.Errorf("file name empty") + goto exit + } - if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil { - part.Close() - return err + compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + if err != nil { + Err = err + goto exit + } + + // Open the new file if it is different that what is already + // opened + if destFile == nil || destFile.metadata.Name != mf.File.Name { + var err error + if destFile != nil { + cleanup: + for { + select { + case err = <-closeFilesErrors: + if err != nil { + Err = err + goto exit + } + default: + break cleanup + } + } + filesToClose <- destFile + } + destFile, err = openDestinationFile(dirfd, mf.File, options) + if err != nil { + Err = err + goto exit + } + } + + if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil { + Err = err + goto exit + } + if c.rawReader != nil { + if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { + Err = err + goto exit + } } } - part.Close() + exit: + if part != nil { + part.Close() + if Err != nil { + break + } + } + } + + if destFile != nil { + return destFile.Close() } + return nil } -func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk { - if len(missingChunks) <= target { - return missingChunks +func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { + getGap := func(missingParts []missingPart, i int) int { + prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length + return int(missingParts[i].SourceChunk.Offset - prev) } + getCost := func(missingParts []missingPart, i int) int { + cost := getGap(missingParts, i) + if missingParts[i-1].OriginFile != nil { + cost += int(missingParts[i-1].SourceChunk.Length) + } + if missingParts[i].OriginFile != nil { + cost += int(missingParts[i].SourceChunk.Length) + } + return cost + } + + // simple case: merge chunks from the same file. + newMissingParts := missingParts[0:1] + prevIndex := 0 + for i := 1; i < len(missingParts); i++ { + gap := getGap(missingParts, i) + if gap == 0 && missingParts[prevIndex].OriginFile == nil && + missingParts[i].OriginFile == nil && + !missingParts[prevIndex].Hole && !missingParts[i].Hole && + len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && + missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { + missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize + missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize + } else { + newMissingParts = append(newMissingParts, missingParts[i]) + prevIndex++ + } + } + missingParts = newMissingParts - getGap := func(missingChunks []missingChunk, i int) int { - prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length - return int(missingChunks[i].RawChunk.Offset - prev) + if len(missingParts) <= target { + return missingParts } // this implementation doesn't account for duplicates, so it could merge // more than necessary to reach the specified target. Since target itself // is a heuristic value, it doesn't matter. - var gaps []int - for i := 1; i < len(missingChunks); i++ { - gaps = append(gaps, getGap(missingChunks, i)) + costs := make([]int, len(missingParts)-1) + for i := 1; i < len(missingParts); i++ { + costs[i-1] = getCost(missingParts, i) } - sort.Ints(gaps) + sort.Ints(costs) - toShrink := len(missingChunks) - target - targetValue := gaps[toShrink-1] + toShrink := len(missingParts) - target + if toShrink >= len(costs) { + toShrink = len(costs) - 1 + } + targetValue := costs[toShrink] - newMissingChunks := missingChunks[0:1] - for i := 1; i < len(missingChunks); i++ { - gap := getGap(missingChunks, i) - if gap > targetValue { - newMissingChunks = append(newMissingChunks, missingChunks[i]) + newMissingParts = missingParts[0:1] + for i := 1; i < len(missingParts); i++ { + if getCost(missingParts, i) > targetValue { + newMissingParts = append(newMissingParts, missingParts[i]) } else { - prev := &newMissingChunks[len(newMissingChunks)-1] - prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length + gap := getGap(missingParts, i) + prev := &newMissingParts[len(newMissingParts)-1] + prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.Hole = false + prev.OriginFile = nil if gap > 0 { - gapFile := missingFile{ + gapFile := missingFileChunk{ Gap: int64(gap), } - prev.Files = append(prev.Files, gapFile) + prev.Chunks = append(prev.Chunks, gapFile) } - prev.Files = append(prev.Files, missingChunks[i].Files...) + prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) } } - return newMissingChunks + return newMissingParts } -func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { +func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk - for _, c := range missingChunks { - chunksToRequest = append(chunksToRequest, c.RawChunk) + + calculateChunksToRequest := func() { + chunksToRequest = []ImageSourceChunk{} + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } + } } + calculateChunksToRequest() + // There are some missing files. Prepare a multirange request for the missing chunks. var streams chan io.ReadCloser var err error @@ -731,32 +1107,33 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChun } if _, ok := err.(ErrBadRequest); ok { - requested := len(missingChunks) + requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. if requested < 64 { return err } // Merge more chunks to request - missingChunks = mergeMissingChunks(missingChunks, requested/2) + missingParts = mergeMissingChunks(missingParts, requested/2) + calculateChunksToRequest() continue } return err } - if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil { + if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { return err } return nil } -func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - parent := filepath.Dir(metadata.Name) - base := filepath.Base(metadata.Name) +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(name) + base := filepath.Base(name) parentFd := dirfd if parent != "." { - parentFile, err := openFileUnderRoot(parent, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0) + parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) if err != nil { return err } @@ -766,21 +1143,21 @@ func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opt if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { if !os.IsExist(err) { - return err + return fmt.Errorf("mkdir %q: %w", name, err) } } - file, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_RDONLY, 0) + file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) if err != nil { return err } defer file.Close() - return setFileAttrs(file, mode, metadata, options) + return setFileAttrs(dirfd, file, mode, metadata, options, false) } func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_RDONLY, 0) + sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err } @@ -789,7 +1166,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -797,25 +1174,35 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDirFd = int(f.Fd()) } - err = unix.Linkat(int(sourceFile.Fd()), "", destDirFd, destBase, unix.AT_EMPTY_PATH) + err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) if err != nil { - return err + return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } - newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY, 0) + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } return err } defer newFile.Close() - return setFileAttrs(newFile, mode, metadata, options) + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -823,7 +1210,10 @@ func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, o destDirFd = int(f.Fd()) } - return unix.Symlinkat(metadata.Linkname, destDirFd, destBase) + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + return nil } type whiteoutHandler struct { @@ -832,13 +1222,16 @@ type whiteoutHandler struct { } func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { - file, err := openFileUnderRoot(path, d.Dirfd, unix.O_RDONLY, 0) + file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) if err != nil { return err } defer file.Close() - return unix.Fsetxattr(int(file.Fd()), name, value, 0) + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) + } + return nil } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { @@ -847,7 +1240,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd := d.Dirfd if dir != "" { - dir, err := openFileUnderRoot(dir, d.Dirfd, unix.O_RDONLY, 0) + dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) if err != nil { return err } @@ -856,12 +1249,16 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd = int(dir.Fd()) } - return unix.Mknodat(dirfd, base, mode, dev) + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return fmt.Errorf("mknod %q: %w", path, err) + } + + return nil } func checkChownErr(err error, name string, uid, gid int) error { if errors.Is(err, syscall.EINVAL) { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) } return err } @@ -899,7 +1296,69 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo return def } +type findAndCopyFileOptions struct { + useHardLinks bool + enableHostDedup bool + ostreeRepos []string + options *archive.TarOptions +} + +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { + finalizeFile := func(dstFile *os.File) error { + if dstFile != nil { + defer dstFile.Close() + if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { + return err + } + } + return nil + } + + found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + if copyOptions.enableHostDedup { + found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { + defer c.layersCache.release() + defer func() { + if c.zstdReader != nil { + c.zstdReader.Close() + } + }() + bigData := map[string][]byte{ bigDataKey: c.manifest, } @@ -927,14 +1386,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") // Generate the manifest - var toc internal.TOC - if err := json.Unmarshal(c.manifest, &toc); err != nil { + toc, err := unmarshalToc(c.manifest) + if err != nil { return output, err } whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) - var missingChunks []missingChunk + var missingParts []missingPart mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { @@ -956,17 +1415,61 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) if err != nil { - return output, err + return output, fmt.Errorf("cannot open %q: %w", dest, err) } defer unix.Close(dirfd) - otherLayersCache := prepareOtherLayersCache(c.layersMetadata) - // hardlinks can point to missing files. So create them after all files // are retrieved var hardLinks []hardLinkToCreate - missingChunksSize, totalChunksSize := int64(0), int64(0) + missingPartsSize, totalChunksSize := int64(0), int64(0) + + copyOptions := findAndCopyFileOptions{ + useHardLinks: useHardLinks, + enableHostDedup: enableHostDedup, + ostreeRepos: ostreeRepos, + options: options, + } + + type copyFileJob struct { + njob int + index int + mode os.FileMode + metadata *internal.FileMetadata + + found bool + err error + } + + var wg sync.WaitGroup + + copyResults := make([]copyFileJob, len(mergedEntries)) + + copyFileJobs := make(chan copyFileJob) + defer func() { + if copyFileJobs != nil { + close(copyFileJobs) + } + wg.Wait() + }() + + for i := 0; i < copyGoRoutines; i++ { + wg.Add(1) + jobs := copyFileJobs + + go func() { + defer wg.Done() + for job := range jobs { + found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) + job.err = err + job.found = found + copyResults[job.njob] = job + } + }() + } + + filesToWaitFor := 0 for i, r := range mergedEntries { if options.ForceMask != nil { value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) @@ -1016,7 +1519,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra return err } defer file.Close() - if err := setFileAttrs(file, mode, &r, options); err != nil { + if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { return err } return nil @@ -1028,7 +1531,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } case tar.TypeDir: - if err := safeMkdir(dirfd, mode, &r, options); err != nil { + if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } continue @@ -1062,74 +1565,95 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra totalChunksSize += r.Size - finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dstFile, mode, &r, options); err != nil { - return err - } + if t == tar.TypeReg { + index := i + njob := filesToWaitFor + job := copyFileJob{ + mode: mode, + metadata: &mergedEntries[index], + index: index, + njob: njob, } - return nil + copyFileJobs <- job + filesToWaitFor++ } + } - found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue - } + close(copyFileJobs) + copyFileJobs = nil - found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks) - if err != nil { - return output, err + wg.Wait() + + for _, res := range copyResults[:filesToWaitFor] { + r := &mergedEntries[res.index] + + if res.err != nil { + return output, res.err } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } + // the file was already copied to its destination + // so nothing left to do. + if res.found { continue } - if enableHostDedup { - found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue + missingPartsSize += r.Size + + remainingSize := r.Size + + // the file is missing, attempt to find individual chunks. + for _, chunk := range r.Chunks { + compressedSize := int64(chunk.EndOffset - chunk.Offset) + size := remainingSize + if chunk.ChunkSize > 0 { + size = chunk.ChunkSize } - } + remainingSize = remainingSize - size - missingChunksSize += r.Size - if t == tar.TypeReg { rawChunk := ImageSourceChunk{ - Offset: uint64(r.Offset), - Length: uint64(r.EndOffset - r.Offset), + Offset: uint64(chunk.Offset), + Length: uint64(compressedSize), } - - file := missingFile{ - File: &mergedEntries[i], + file := missingFileChunk{ + File: &mergedEntries[res.index], + CompressedSize: compressedSize, + UncompressedSize: size, } - - missingChunks = append(missingChunks, missingChunk{ - RawChunk: rawChunk, - Files: []missingFile{ + mp := missingPart{ + SourceChunk: &rawChunk, + Chunks: []missingFileChunk{ file, }, - }) + } + + switch chunk.ChunkType { + case internal.ChunkTypeData: + root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) + if err != nil { + return output, err + } + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + missingPartsSize -= size + mp.OriginFile = &originFile{ + Root: root, + Path: path, + Offset: offset, + } + } + case internal.ChunkTypeZeros: + missingPartsSize -= size + mp.Hole = true + // Mark all chunks belonging to the missing part as holes + for i := range mp.Chunks { + mp.Chunks[i].Hole = true + } + } + missingParts = append(missingParts, mp) } } // There are some missing files. Prepare a multirange request for the missing chunks. - if len(missingChunks) > 0 { - missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks) - if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil { + if len(missingParts) > 0 { + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) + if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { return output, err } } @@ -1141,31 +1665,69 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } if totalChunksSize > 0 { - logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize)) + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } return output, nil } +func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { + // ignore the metadata files for the estargz format. + if fileType != fileTypeEstargz { + return false + } + switch e.Name { + // ignore the metadata files for the estargz format. + case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: + return true + } + return false +} + func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { - var mergedEntries []internal.FileMetadata - var prevEntry *internal.FileMetadata - for _, entry := range entries { - e := entry + countNextChunks := func(start int) int { + count := 0 + for _, e := range entries[start:] { + if e.Type != TypeChunk { + return count + } + count++ + } + return count + } - // ignore the metadata files for the estargz format. - if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) { + size := 0 + for _, entry := range entries { + if mustSkipFile(fileType, entry) { continue } + if entry.Type != TypeChunk { + size++ + } + } + mergedEntries := make([]internal.FileMetadata, size) + m := 0 + for i := 0; i < len(entries); i++ { + e := entries[i] + if mustSkipFile(fileType, e) { + continue + } if e.Type == TypeChunk { - if prevEntry == nil || prevEntry.Type != TypeReg { - return nil, errors.New("chunk type without a regular file") + return nil, fmt.Errorf("chunk type without a regular file") + } + + if e.Type == TypeReg { + nChunks := countNextChunks(i + 1) + + e.Chunks = make([]*internal.FileMetadata, nChunks+1) + for j := 0; j <= nChunks; j++ { + e.Chunks[j] = &entries[i+j] + e.EndOffset = entries[i+j].EndOffset } - prevEntry.EndOffset = e.EndOffset - continue + i += nChunks } - mergedEntries = append(mergedEntries, e) - prevEntry = &e + mergedEntries[m] = e + m++ } // stargz/estargz doesn't store EndOffset so let's calculate it here lastOffset := c.tocOffset @@ -1176,6 +1738,47 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if mergedEntries[i].Offset != 0 { lastOffset = mergedEntries[i].Offset } + + lastChunkOffset := mergedEntries[i].EndOffset + for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { + mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset + lastChunkOffset = mergedEntries[i].Chunks[j].Offset + } } return mergedEntries, nil } + +// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the +// same digest as chunk.ChunkDigest +func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return false + } + defer unix.Close(parentDirfd) + + fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + if err != nil { + return false + } + defer fd.Close() + + if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + return false + } + + r := io.LimitReader(fd, chunk.ChunkSize) + digester := digest.Canonical.Digester() + + if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { + return false + } + + digest, err := digest.Parse(chunk.ChunkDigest) + if err != nil { + return false + } + + return digester.Digest() == digest +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index e6622cf1466..f6e0cfcfe86 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -12,109 +12,109 @@ type ThinpoolOptionsConfig struct { // grown. This is specified in terms of % of pool size. So a value of // 20 means that when threshold is hit, pool will be grown by 20% of // existing pool size. - AutoExtendPercent string `toml:"autoextend_percent"` + AutoExtendPercent string `toml:"autoextend_percent,omitempty"` // AutoExtendThreshold determines the pool extension threshold in terms // of percentage of pool size. For example, if threshold is 60, that // means when pool is 60% full, threshold has been hit. - AutoExtendThreshold string `toml:"autoextend_threshold"` + AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` // BaseSize specifies the size to use when creating the base device, // which limits the size of images and containers. - BaseSize string `toml:"basesize"` + BaseSize string `toml:"basesize,omitempty"` // BlockSize specifies a custom blocksize to use for the thin pool. - BlockSize string `toml:"blocksize"` + BlockSize string `toml:"blocksize,omitempty"` // DirectLvmDevice specifies a custom block storage device to use for // the thin pool. - DirectLvmDevice string `toml:"directlvm_device"` + DirectLvmDevice string `toml:"directlvm_device,omitempty"` // DirectLvmDeviceForcewipes device even if device already has a // filesystem - DirectLvmDeviceForce string `toml:"directlvm_device_force"` + DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` // Fs specifies the filesystem type to use for the base device. - Fs string `toml:"fs"` + Fs string `toml:"fs,omitempty"` // log_level sets the log level of devicemapper. - LogLevel string `toml:"log_level"` + LogLevel string `toml:"log_level,omitempty"` // MetadataSize specifies the size of the metadata for the thinpool // It will be used with the `pvcreate --metadata` option. - MetadataSize string `toml:"metadatasize"` + MetadataSize string `toml:"metadatasize,omitempty"` // MinFreeSpace specifies the min free space percent in a thin pool // require for new device creation to - MinFreeSpace string `toml:"min_free_space"` + MinFreeSpace string `toml:"min_free_space,omitempty"` // MkfsArg specifies extra mkfs arguments to be used when creating the // basedevice. - MkfsArg string `toml:"mkfsarg"` + MkfsArg string `toml:"mkfsarg,omitempty"` // MountOpt specifies extra mount options used when mounting the thin // devices. - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // UseDeferredDeletion marks device for deferred deletion - UseDeferredDeletion string `toml:"use_deferred_deletion"` + UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` // UseDeferredRemoval marks device for deferred removal - UseDeferredRemoval string `toml:"use_deferred_removal"` + UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of // retries XFS should attempt to complete IO when ENOSPC (no space) // error is returned by underlying storage device. - XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` } type AufsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` } type BtrfsOptionsConfig struct { // MinSpace is the minimal spaces allocated to the device - MinSpace string `toml:"min_space"` + MinSpace string `toml:"min_space,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } type OverlayOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // Inodes is used to set a maximum inodes of the container image. - Inodes string `toml:"inodes"` + Inodes string `toml:"inodes,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories - ForceMask string `toml:"force_mask"` + ForceMask string `toml:"force_mask,omitempty"` } type VfsOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` } type ZfsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Name is the File System name of the ZFS File system - Name string `toml:"fsname"` + Name string `toml:"fsname,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } // OptionsConfig represents the "storage.options" TOML config table. @@ -122,82 +122,82 @@ type OptionsConfig struct { // AdditionalImagesStores is the location of additional read/only // Image stores. Usually used to access Networked File System // for shared image content - AdditionalImageStores []string `toml:"additionalimagestores"` + AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` // AdditionalLayerStores is the location of additional read/only // Layer stores. Usually used to access Networked File System // for shared image content // This API is experimental and can be changed without bumping the // major version number. - AdditionalLayerStores []string `toml:"additionallayerstores"` + AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // RemapUIDs is a list of default UID mappings to use for layers. - RemapUIDs string `toml:"remap-uids"` + RemapUIDs string `toml:"remap-uids,omitempty"` // RemapGIDs is a list of default GID mappings to use for layers. - RemapGIDs string `toml:"remap-gids"` + RemapGIDs string `toml:"remap-gids,omitempty"` // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories. - ForceMask os.FileMode `toml:"force_mask"` + ForceMask os.FileMode `toml:"force_mask,omitempty"` // RemapUser is the name of one or more entries in /etc/subuid which // should be used to set up default UID mappings. - RemapUser string `toml:"remap-user"` + RemapUser string `toml:"remap-user,omitempty"` // RemapGroup is the name of one or more entries in /etc/subgid which // should be used to set up default GID mappings. - RemapGroup string `toml:"remap-group"` + RemapGroup string `toml:"remap-group,omitempty"` // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and // /etc/subgid which should be used to set up automatically a userns. - RootAutoUsernsUser string `toml:"root-auto-userns-user"` + RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` // AutoUsernsMinSize is the minimum size for a user namespace that is // created automatically. - AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"` + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"` // AutoUsernsMaxSize is the maximum size for a user namespace that is // created automatically. - AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"` + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` // Aufs container options to be handed to aufs drivers - Aufs struct{ AufsOptionsConfig } `toml:"aufs"` + Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` // Btrfs container options to be handed to btrfs drivers - Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"` + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` // Thinpool container options to be handed to thinpool drivers - Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` // Overlay container options to be handed to overlay drivers - Overlay struct{ OverlayOptionsConfig } `toml:"overlay"` + Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` // Vfs container options to be handed to VFS drivers - Vfs struct{ VfsOptionsConfig } `toml:"vfs"` + Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"` // Zfs container options to be handed to ZFS drivers - Zfs struct{ ZfsOptionsConfig } `toml:"zfs"` + Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // PullOptions specifies options to be handed to pull managers // This API is experimental and can be changed without bumping the major version number. - PullOptions map[string]string `toml:"pull_options"` + PullOptions map[string]string `toml:"pull_options,omitempty"` // DisableVolatile doesn't allow volatile mounts when it is set. - DisableVolatile bool `toml:"disable-volatile"` + DisableVolatile bool `toml:"disable-volatile,omitempty"` } // GetGraphDriverOptions returns the driver specific options diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 8d58d24cac8..36e1bdd5fc8 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,8 +1,10 @@ +//go:build linux || darwin || freebsd || solaris // +build linux darwin freebsd solaris package directory import ( + "io/fs" "os" "path/filepath" "syscall" @@ -21,7 +23,7 @@ func Size(dir string) (size int64, err error) { func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(d string, entry fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Usage() returns the error. // if dir/x disappeared while walking, Usage() ignores dir/x. @@ -31,8 +33,9 @@ func Usage(dir string) (usage *DiskUsage, err error) { return err } - if fileInfo == nil { - return nil + fileInfo, err := entry.Info() + if err != nil { + return err } // Check inode to only count the sizes of files with multiple hard links once. @@ -44,9 +47,8 @@ func Usage(dir string) (usage *DiskUsage, err error) { // inode is not a uint64 on all platforms. Cast it to avoid issues. data[uint64(inode)] = struct{}{} - // Ignore directory sizes - if fileInfo.IsDir() { + if entry.IsDir() { return nil } diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go index a7a81240bc2..482bc51a26e 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go @@ -1,8 +1,10 @@ +//go:build windows // +build windows package directory import ( + "io/fs" "os" "path/filepath" ) @@ -19,11 +21,11 @@ func Size(dir string) (size int64, err error) { // Usage walks a directory tree and returns its total size in bytes and the number of inodes. func Usage(dir string) (usage *DiskUsage, err error) { usage = &DiskUsage{} - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + err = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { // if dir does not exist, Size() returns the error. // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { + if os.IsNotExist(err) && path != dir { return nil } return err @@ -32,16 +34,15 @@ func Usage(dir string) (usage *DiskUsage, err error) { usage.InodeCount++ // Ignore directory sizes - if fileInfo == nil { + if d.IsDir() { return nil } - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil + fileInfo, err := d.Info() + if err != nil { + return err } - - usage.Size += s + usage.Size += fileInfo.Size() return nil }) diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000000..85c5e76c844 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,52 @@ +package homedir + +import ( + "errors" + "os" + "path/filepath" +) + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 06b53854b93..027db259c19 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -18,18 +18,3 @@ func GetRuntimeDir() (string, error) { func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} - -// GetCacheHome is unsupported on non-linux system. -func GetCacheHome() (string, error) { - return "", errors.New("homedir.GetCacheHome() is not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 2475e351bb5..33177bdf306 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -93,48 +93,3 @@ func stick(f string) error { m |= os.ModeSticky return os.Chmod(f, m) } - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - -// GetCacheHome returns XDG_CACHE_HOME. -// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetCacheHome() (string, error) { - if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { - return xdgCacheHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CACHE_HOME or HOME") - } - return filepath.Join(home, ".cache"), nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index 4f2615ed32f..af65f2c03de 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -17,7 +17,12 @@ func Key() string { // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { - return os.Getenv(Key()) + home := os.Getenv(Key()) + if home != "" { + return home + } + home, _ = os.UserHomeDir() + return home } // GetShortcutString returns the string that is shortcut to user's home directory diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 83bc8c34ff4..7a8fec0ce5f 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -3,15 +3,18 @@ package idtools import ( "bufio" "fmt" + "io/ioutil" "os" "os/user" "sort" "strconv" "strings" + "sync" "syscall" "github.com/containers/storage/pkg/system" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -82,7 +85,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(uidMap) == 1 && uidMap[0].Size == 1 { uid = uidMap[0].HostID } else { - uid, err = toHost(0, uidMap) + uid, err = RawToHost(0, uidMap) if err != nil { return -1, -1, err } @@ -90,7 +93,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(gidMap) == 1 && gidMap[0].Size == 1 { gid = gidMap[0].HostID } else { - gid, err = toHost(0, gidMap) + gid, err = RawToHost(0, gidMap) if err != nil { return -1, -1, err } @@ -98,10 +101,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { return uid, gid, nil } -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { +// RawToContainer takes an id mapping, and uses it to translate a host ID to +// the remapped ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -114,10 +121,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { +// RawToHost takes an id mapping and a remapped ID, and translates the ID to +// the mapped host ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -182,31 +193,87 @@ func (i *IDMappings) RootPair() IDPair { } // ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + var target IDPair + + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + return target, err + } + + target.GID, err = RawToHost(pair.GID, i.gids) + return target, err +} + +var ( + overflowUIDOnce sync.Once + overflowGIDOnce sync.Once + overflowUID int + overflowGID int +) + +// getOverflowUID returns the UID mapped to the overflow user +func getOverflowUID() int { + overflowUIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present + overflowUID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowUID = tmp + } + } + }) + return overflowUID +} + +// getOverflowUID returns the GID mapped to the overflow user +func getOverflowGID() int { + overflowGIDOnce.Do(func() { + // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present + overflowGID = 65534 + if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { + if tmp, err := strconv.Atoi(string(content)); err == nil { + overflowGID = tmp + } + } + }) + return overflowGID +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +// If the mapping is not possible because the target ID is not mapped into +// the namespace, then the overflow ID is used. +func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { var err error target := i.RootPair() if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) + target.UID, err = RawToHost(pair.UID, i.uids) if err != nil { - return target, err + target.UID = getOverflowUID() + logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) } } if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) + target.GID, err = RawToHost(pair.GID, i.gids) + if err != nil { + target.GID = getOverflowGID() + logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) + } } - return target, err + return target, nil } // ToContainer returns the container UID and GID for the host uid and gid func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) + uid, err := RawToContainer(pair.UID, i.uids) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := RawToContainer(pair.GID, i.gids) return uid, gid, err } @@ -293,7 +360,7 @@ func parseSubidFile(path, username string) (ranges, error) { func checkChownErr(err error, name string, uid, gid int) error { if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate", uid, gid, name) } return err } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index db50a62e4c0..6e6e3b22bc9 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -12,11 +12,21 @@ import ( #cgo LDFLAGS: -l subid #include #include +#include const char *Prog = "storage"; +FILE *shadow_logfd = NULL; + struct subid_range get_range(struct subid_range *ranges, int i) { - return ranges[i]; + shadow_logfd = stderr; + return ranges[i]; } + +#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_get_uid_ranges get_subuid_ranges +# define subid_get_gid_ranges get_subgid_ranges +#endif + */ import "C" @@ -32,9 +42,9 @@ func readSubid(username string, isUser bool) (ranges, error) { var nRanges C.int var cRanges *C.struct_subid_range if isUser { - nRanges = C.get_subuid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) } else { - nRanges = C.get_subgid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) } if nRanges < 0 { return nil, errors.New("cannot read subids") diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 9776b2a1287..7f270c61f82 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -46,6 +46,9 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path + if !filepath.IsAbs(dirPath) { + return fmt.Errorf("path: %s should be absolute", dirPath) + } for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000000..3ba99cf9351 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MNT_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MNT_SYNCHRONOUS + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MNT_UPDATE + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MNT_NOATIME + + mntDetach = unix.MNT_FORCE + + NODIRATIME = 0 + NODEV = 0 + DIRSYNC = 0 + MANDLOCK = 0 + BIND = 0 + RBIND = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SLAVE = 0 + RSLAVE = 0 + SHARED = 0 + RSHARED = 0 + RELATIME = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go index 9afd26d4c06..ee0f593a50a 100644 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -1,4 +1,5 @@ -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package mount diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go index b31cf99d0ff..2404e331dee 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -1,3 +1,6 @@ +//go:build freebsd && cgo +// +build freebsd,cgo + package mount /* @@ -28,14 +31,25 @@ func allocateIOVecs(options []string) []C.struct_iovec { func mount(device, target, mType string, flag uintptr, data string) error { isNullFS := false - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true + options := []string{"fspath", target} + + if data != "" { + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + continue + } + opt := strings.SplitN(x, "=", 2) + options = append(options, opt[0]) + if len(opt) == 2 { + options = append(options, opt[1]) + } else { + options = append(options, "") + } } } - options := []string{"fspath", target} if isNullFS { options = append(options, "fstype", "nullfs", "target", device) } else { diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go index 9d20cfbf869..74fe666090f 100644 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -1,4 +1,6 @@ -// +build !linux,!freebsd +//go:build !linux && !(freebsd && cgo) +// +build !linux +// +build !freebsd !cgo package mount diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go new file mode 100644 index 00000000000..6f63ae99170 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go @@ -0,0 +1,37 @@ +// +build freebsd + +package reexec + +import ( + "context" + "os" + "os/exec" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Uses sysctl. +func Self() string { + path, err := unix.SysctlArgs("kern.proc.pathname", -1) + if err == nil { + return path + } + return os.Args[0] +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 372bee7321f..d3dd86d349f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -17,6 +17,7 @@ func Self() string { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 1ecaa906fed..a56ada2161e 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -1,4 +1,5 @@ -// +build freebsd solaris darwin +//go:build solaris || darwin +// +build solaris darwin package reexec @@ -17,6 +18,7 @@ func Self() string { // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -24,6 +26,7 @@ func Command(args ...string) *exec.Cmd { // CommandContext returns *exec.Cmd which has Path as current binary. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 9d937426854..5b3605f319c 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -9,10 +9,12 @@ import ( // Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } // CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index 673ab476abd..d868564767f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -17,6 +17,7 @@ func Self() string { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index c56671d9192..a1938cd4f34 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -7,7 +7,10 @@ import ( "path/filepath" ) -var registeredInitializers = make(map[string]func()) +var ( + registeredInitializers = make(map[string]func()) + initWasCalled = false +) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { @@ -22,6 +25,7 @@ func Register(name string, initializer func()) { // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] + initWasCalled = true if exists { initializer() @@ -30,6 +34,21 @@ func Init() bool { return false } +func panicIfNotInitialized() { + if !initWasCalled { + // The reexec package is used to run subroutines in + // subprocesses which would otherwise have unacceptable side + // effects on the main thread. If you found this error, then + // your program uses a package which needs to do this. In + // order for that to work, main() should start with this + // boilerplate, or an equivalent: + // if reexec.Init() { + // return + // } + panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") + } +} + func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go index 10355848bdb..6b47c4e717f 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -13,6 +13,9 @@ const ( // Operation not supported EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP + + // Value is too small or too large for maximum size allowed + EOVERFLOW unix.Errno = unix.EOVERFLOW ) // Lgetxattr retrieves the value of the extended attribute identified by attr diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go index bc8b8e3a5fe..3fc27f0b139 100644 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -10,6 +10,9 @@ const ( // Operation not supported EOPNOTSUPP syscall.Errno = syscall.Errno(0) + + // Value is too small or too large for maximum size allowed + EOVERFLOW syscall.Errno = syscall.Errno(0) ) // Lgetxattr is not supported on platforms other than linux. diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 6d351ce80a9..baeb8f1aab5 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package unshare @@ -9,6 +10,7 @@ import ( "io" "os" "os/exec" + "os/signal" "os/user" "runtime" "strconv" @@ -75,6 +77,28 @@ func getRootlessGID() int { return os.Getegid() } +// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the +// matching file capabilitiy +func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { + info, err := os.Stat(path) + if err != nil { + return false, err + } + + mode := info.Mode() + if mode&modeid == modeid { + return true, nil + } + cap, err := capability.NewFile2(path) + if err != nil { + return false, err + } + if err := cap.Load(); err != nil { + return false, err + } + return cap.Get(capability.EFFECTIVE, capid), nil +} + func (c *Cmd) Start() error { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -214,15 +238,26 @@ func (c *Cmd) Start() error { gidmapSet := false // Set the GID map. if c.UseNewgidmap { - cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newgidmap") + if err != nil { + return errors.Wrapf(err, "error finding newgidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) g.Reset() cmd.Stdout = g cmd.Stderr = g - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { gidmapSet = true } else { logrus.Warnf("Error running newgidmap: %v: %s", err, g.String()) + isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) + if err != nil { + logrus.Warnf("Failed to check for setgid on %s: %v", path, err) + } else { + if !isSetgid { + logrus.Warnf("%s should be setgid or have filecaps setgid", path) + } + } logrus.Warnf("Falling back to single mapping") g.Reset() g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) @@ -261,17 +296,29 @@ func (c *Cmd) Start() error { fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) } uidmapSet := false - // Set the GID map. + // Set the UID map. if c.UseNewuidmap { - cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + path, err := exec.LookPath("newuidmap") + if err != nil { + return errors.Wrapf(err, "error finding newuidmap") + } + cmd := exec.Command(path, append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) u.Reset() cmd.Stdout = u cmd.Stderr = u - err := cmd.Run() - if err == nil { + if err := cmd.Run(); err == nil { uidmapSet = true } else { logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) + isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) + if err != nil { + logrus.Warnf("Failed to check for setuid on %s: %v", path, err) + } else { + if !isSetuid { + logrus.Warnf("%s should be setuid or have filecaps setuid", path) + } + } + logrus.Warnf("Falling back to single mapping") u.Reset() u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) @@ -484,6 +531,30 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { // Finish up. logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + + // Forward SIGHUP, SIGINT, and SIGTERM to our child process. + interrupted := make(chan os.Signal, 100) + defer func() { + signal.Stop(interrupted) + close(interrupted) + }() + cmd.Hook = func(int) error { + go func() { + for receivedSignal := range interrupted { + cmd.Cmd.Process.Signal(receivedSignal) + } + }() + return nil + } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + // Make sure our child process gets SIGKILLed if we exit, for whatever + // reason, before it does. + if cmd.Cmd.SysProcAttr == nil { + cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL + ExecRunnable(cmd, nil) } @@ -501,11 +572,11 @@ func ExecRunnable(cmd Runnable, cleanup func()) { if exitError.ProcessState.Exited() { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(waitStatus.ExitStatus()) } if waitStatus.Signaled() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(int(waitStatus.Signal()) + 128) } } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 722750c0cca..c17dd6d37ea 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -1,5 +1,14 @@ # This file is is the configuration file for all tools -# that use the containers/storage library. +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information # The "container storage" table contains all of the server options. [storage] @@ -11,8 +20,14 @@ driver = "overlay" runroot = "/run/containers/storage" # Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" + # Storage path for rootless users # # rootless_storage_path = "$HOME/.local/share/containers/storage" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd new file mode 100644 index 00000000000..34d80152c02 --- /dev/null +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -0,0 +1,205 @@ +# This file is is the configuration file for all tools +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/local/share/containers/storage.conf +# /usr/local/etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) +# See man 5 containers-storage.conf for more information +# The "container storage" table contains all of the server options. +[storage] + +# Default Storage Driver, Must be set for proper operation. +driver = "zfs" + +# Temporary storage location +runroot = "/var/run/containers/storage" + +# Primary Read/Write location of container storage +graphroot = "/var/db/containers/storage" + + +# Storage path for rootless users +# +# rootless_storage_path = "$HOME/.local/share/containers/storage" + +[storage.options] +# Storage options to be passed to underlying storage drivers + +# AdditionalImageStores is used to pass paths to additional Read/Only image stores +# Must be comma separated list. +additionalimagestores = [ +] + +# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of +# a container, to the UIDs/GIDs as they should appear outside of the container, +# and the length of the range of UIDs/GIDs. Additional mapped sets can be +# listed and will be heeded by libraries, but there are limits to the number of +# mappings which the kernel will allow when you later attempt to run a +# container. +# +# remap-uids = 0:1668442479:65536 +# remap-gids = 0:1668442479:65536 + +# Remap-User/Group is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting +# with an in-container ID of 0 and then a host-level ID taken from the lowest +# range that matches the specified name, and using the length of that range. +# Additional ranges are then assigned, using the ranges which specify the +# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID, +# until all of the entries have been used for maps. +# +# remap-user = "containers" +# remap-group = "containers" + +# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID +# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned +# to containers configured to create automatically a user namespace. Containers +# configured to automatically create a user namespace can still overlap with containers +# having an explicit mapping set. +# This setting is ignored when running as rootless. +# root-auto-userns-user = "storage" +# +# Auto-userns-min-size is the minimum size for a user namespace created automatically. +# auto-userns-min-size=1024 +# +# Auto-userns-max-size is the minimum size for a user namespace created automatically. +# auto-userns-max-size=65536 + +[storage.options.overlay] +# ignore_chown_errors can be set to allow a non privileged user running with +# a single UID within a user namespace to run containers. The user can pull +# and use any image even those with multiple uids. Note multiple UIDs will be +# squashed down to the default uid in the container. These images will have no +# separation between the users in the container. Only supported for the overlay +# and vfs drivers. +#ignore_chown_errors = "false" + +# Inodes is used to set a maximum inodes of the container image. +# inodes = "" + +# Path to an helper program to use for mounting the file system instead of mounting it +# directly. +#mount_program = "/usr/bin/fuse-overlayfs" + +# mountopt specifies comma separated list of extra mount options +mountopt = "nodev" + +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + +# Size is used to set a maximum size of the container image. +# size = "" + +# ForceMask specifies the permissions mask that is used for new files and +# directories. +# +# The values "shared" and "private" are accepted. +# Octal permission masks are also accepted. +# +# "": No value specified. +# All files/directories, get set with the permissions identified within the +# image. +# "private": it is equivalent to 0700. +# All files/directories get set with 0700 permissions. The owner has rwx +# access to the files. No other users on the system can access the files. +# This setting could be used with networked based homedirs. +# "shared": it is equivalent to 0755. +# The owner has rwx access to the files and everyone else can read, access +# and execute them. This setting is useful for sharing containers storage +# with other users. For instance have a storage owned by root but shared +# to rootless users as an additional store. +# NOTE: All files within the image are made readable and executable by any +# user on the system. Even /etc/shadow within your image is now readable by +# any user. +# +# OCTAL: Users can experiment with other OCTAL Permissions. +# +# Note: The force_mask Flag is an experimental feature, it could change in the +# future. When "force_mask" is set the original permission mask is stored in +# the "user.containers.override_stat" xattr and the "mount_program" option must +# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the +# extended attribute permissions to processes within containers rather then the +# "force_mask" permissions. +# +# force_mask = "" + +[storage.options.thinpool] +# Storage Options for thinpool + +# autoextend_percent determines the amount by which pool needs to be +# grown. This is specified in terms of % of pool size. So a value of 20 means +# that when threshold is hit, pool will be grown by 20% of existing +# pool size. +# autoextend_percent = "20" + +# autoextend_threshold determines the pool extension threshold in terms +# of percentage of pool size. For example, if threshold is 60, that means when +# pool is 60% full, threshold has been hit. +# autoextend_threshold = "80" + +# basesize specifies the size to use when creating the base device, which +# limits the size of images and containers. +# basesize = "10G" + +# blocksize specifies a custom blocksize to use for the thin pool. +# blocksize="64k" + +# directlvm_device specifies a custom block storage device to use for the +# thin pool. Required if you setup devicemapper. +# directlvm_device = "" + +# directlvm_device_force wipes device even if device already has a filesystem. +# directlvm_device_force = "True" + +# fs specifies the filesystem type to use for the base device. +# fs="xfs" + +# log_level sets the log level of devicemapper. +# 0: LogLevelSuppress 0 (Default) +# 2: LogLevelFatal +# 3: LogLevelErr +# 4: LogLevelWarn +# 5: LogLevelNotice +# 6: LogLevelInfo +# 7: LogLevelDebug +# log_level = "7" + +# min_free_space specifies the min free space percent in a thin pool require for +# new device creation to succeed. Valid values are from 0% - 99%. +# Value 0% disables +# min_free_space = "10%" + +# mkfsarg specifies extra mkfs arguments to be used when creating the base +# device. +# mkfsarg = "" + +# metadata_size is used to set the `pvcreate --metadatasize` options when +# creating thin devices. Default is 128k +# metadata_size = "" + +# Size is used to set a maximum size of the container image. +# size = "" + +# use_deferred_removal marks devicemapper block device for deferred removal. +# If the thinpool is in use when the driver attempts to remove it, the driver +# tells the kernel to remove it as soon as possible. Note this does not free +# up the disk space, use deferred deletion to fully remove the thinpool. +# use_deferred_removal = "True" + +# use_deferred_deletion marks thinpool device for deferred deletion. +# If the device is busy when the driver attempts to delete it, the driver +# will attempt to delete device every 30 seconds until successful. +# If the program using the driver exits, the driver will continue attempting +# to cleanup the next time the driver is used. Deferred deletion permanently +# deletes the device and all data stored in device will be lost. +# use_deferred_deletion = "True" + +# xfs_nospace_max_retries specifies the maximum number of retries XFS should +# attempt to complete IO when ENOSPC (no space) error is returned by +# underlying storage device. +# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 169c7d1513e..45912d0ca59 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -31,6 +31,14 @@ import ( "github.com/pkg/errors" ) +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + var ( stores []*store storesLock sync.Mutex @@ -368,8 +376,17 @@ type Store interface { // SetNames changes the list of names for a layer, image, or container. // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + // ListImageBigData retrieves a list of the (possibly large) chunks of // named data associated with an image. ListImageBigData(id string) ([]string, error) @@ -575,10 +592,11 @@ type ContainerOptions struct { // container's layer will inherit settings from the image's top layer // or, if it is not being created based on an image, the Store object. types.IDMappingOptions - LabelOpts []string - Flags map[string]interface{} - MountOpts []string - Volatile bool + LabelOpts []string + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string } type store struct { @@ -646,17 +664,21 @@ func GetStore(options types.StoreOptions) (Store, error) { storesLock.Lock() defer storesLock.Unlock() + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first for _, s := range stores { - if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { return s, nil } } - if options.GraphRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified") - } - if options.RunRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified") + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified") + case options.GraphRoot == "": + options.GraphRoot = types.Options().GraphRoot + case options.RunRoot == "": + options.RunRoot = types.Options().RunRoot } if err := os.MkdirAll(options.RunRoot, 0700); err != nil { @@ -1173,6 +1195,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea if layer == nil { layer = cLayer parentLayer = cParentLayer + if store != rlstore { + // The layer is in another store, so we cannot + // create a mapped version of it to the image. + createMappedLayer = false + } } } } @@ -1384,7 +1411,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Flags["MountLabel"] = mountLabel } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true) + clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true) if err != nil { return nil, err } @@ -1608,7 +1635,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { } } if foundImage { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id) + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id) } return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) } @@ -2045,7 +2072,20 @@ func dedupeNames(names []string) []string { return deduped } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeNames(names) rlstore, err := s.LayerStore() @@ -2058,7 +2098,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rlstore.Exists(id) { - return rlstore.SetNames(id, deduped) + switch op { + case setNames: + return rlstore.SetNames(id, deduped) + case removeNames: + return rlstore.RemoveNames(id, deduped) + case addNames: + return rlstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } ristore, err := s.ImageStore() @@ -2071,7 +2120,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if ristore.Exists(id) { - return ristore.SetNames(id, deduped) + switch op { + case setNames: + return ristore.SetNames(id, deduped) + case removeNames: + return ristore.RemoveNames(id, deduped) + case addNames: + return ristore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } // Check is id refers to a RO Store @@ -2109,7 +2167,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rcstore.Exists(id) { - return rcstore.SetNames(id, deduped) + switch op { + case setNames: + return rcstore.SetNames(id, deduped) + case removeNames: + return rcstore.RemoveNames(id, deduped) + case addNames: + return rcstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } return ErrLayerUnknown } @@ -2370,22 +2437,16 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if err != nil { return nil, err } - childrenByParent := make(map[string]*[]string) + childrenByParent := make(map[string][]string) for _, layer := range layers { - parent := layer.Parent - if list, ok := childrenByParent[parent]; ok { - newList := append(*list, layer.ID) - childrenByParent[parent] = &newList - } else { - childrenByParent[parent] = &([]string{layer.ID}) - } + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) } - otherImagesByTopLayer := make(map[string]string) + otherImagesTopLayers := make(map[string]struct{}) for _, img := range images { if img.ID != id { - otherImagesByTopLayer[img.TopLayer] = img.ID + otherImagesTopLayers[img.TopLayer] = struct{}{} for _, layerID := range img.MappedTopLayers { - otherImagesByTopLayer[layerID] = img.ID + otherImagesTopLayers[layerID] = struct{}{} } } } @@ -2395,43 +2456,44 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } } layer := image.TopLayer - lastRemoved := "" + layersToRemoveMap := make(map[string]struct{}) + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } for layer != "" { if rcstore.Exists(layer) { break } - if _, ok := otherImagesByTopLayer[layer]; ok { + if _, used := otherImagesTopLayers[layer]; used { break } parent := "" if l, err := rlstore.Get(layer); err == nil { parent = l.Parent } - hasOtherRefs := func() bool { + hasChildrenNotBeingRemoved := func() bool { layersToCheck := []string{layer} if layer == image.TopLayer { layersToCheck = append(layersToCheck, image.MappedTopLayers...) } for _, layer := range layersToCheck { - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { - return true + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue } + return true } } } return false } - if hasOtherRefs() { + if hasChildrenNotBeingRemoved() { break } - lastRemoved = layer - if layer == image.TopLayer { - layersToRemove = append(layersToRemove, image.MappedTopLayers...) - } - layersToRemove = append(layersToRemove, lastRemoved) + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} layer = parent } } else { @@ -2499,23 +2561,29 @@ func (s *store) DeleteContainer(id string) error { gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) wg.Add(1) go func() { - var err error - for attempts := 0; attempts < 50; attempts++ { - err = os.RemoveAll(gcpath) - if err == nil || !system.IsEBUSY(err) { - break - } - time.Sleep(time.Millisecond * 100) + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return } - errChan <- err - wg.Done() + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) }() rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) wg.Add(1) go func() { - errChan <- os.RemoveAll(rcpath) - wg.Done() + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) }() go func() { @@ -2524,17 +2592,12 @@ func (s *store) DeleteContainer(id string) error { }() var errors []error - for { - select { - case err, ok := <-errChan: - if !ok { - return multierror.Append(nil, errors...).ErrorOrNil() - } - if err != nil { - errors = append(errors, err) - } + for err := range errChan { + if err != nil { + errors = append(errors, err) } } + return multierror.Append(nil, errors...).ErrorOrNil() } } return ErrNotAContainer @@ -2830,10 +2893,33 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro if err != nil { return nil, err } + + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + s.graphLock.Lock() + defer s.graphLock.Unlock() + + modified, err := s.graphLock.Modified() + if err != nil { + return nil, err + } + + // We need to make sure the home mount is present when the Mount is done. + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return nil, err + } + s.lastLoaded = time.Now() + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s store.RLock() if err := store.ReloadIfChanged(); err != nil { + store.Unlock() return nil, err } if store.Exists(to) { diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index d920d12eb50..ad12ffdbf2d 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -55,4 +55,6 @@ var ( ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = errors.New("not supported") + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = errors.New("invalid mappings specified") ) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index f9bf7e6b658..d318421a44b 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -3,44 +3,57 @@ package types import ( "fmt" "os" - "os/exec" "path/filepath" "strings" "sync" "time" "github.com/BurntSushi/toml" - "github.com/containers/storage/drivers/overlay" cfg "github.com/containers/storage/pkg/config" "github.com/containers/storage/pkg/idtools" "github.com/sirupsen/logrus" ) // TOML-friendly explicit tables used for conversions. -type tomlConfig struct { +type TomlConfig struct { Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - RootlessStoragePath string `toml:"rootless_storage_path"` - Options cfg.OptionsConfig `toml:"options"` + Driver string `toml:"driver,omitempty"` + RunRoot string `toml:"runroot,omitempty"` + GraphRoot string `toml:"graphroot,omitempty"` + RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + Options cfg.OptionsConfig `toml:"options,omitempty"` } `toml:"storage"` } -// defaultConfigFile path to the system wide storage.conf file -var ( - defaultConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false - // DefaultStoreOptions is a reasonable default set of options. - defaultStoreOptions StoreOptions +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" ) func init() { - defaultStoreOptions.RunRoot = "/run/containers/storage" - defaultStoreOptions.GraphRoot = "/var/lib/containers/storage" + defaultStoreOptions.RunRoot = defaultRunRoot + defaultStoreOptions.GraphRoot = defaultGraphRoot defaultStoreOptions.GraphDriverName = "" - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions) + } else { + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + } + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } } // defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. @@ -168,7 +181,6 @@ func isRootlessDriver(driver string) bool { // getRootlessStorageOpts returns the storage opts for containers running as non root func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions - const overlayDriver = "overlay" dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) if err != nil { @@ -190,25 +202,16 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { opts.GraphDriverName = driver } - if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver { - supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime) - if err != nil { - return opts, err - } - if supported { - opts.GraphDriverName = overlayDriver - } else { - if path, err := exec.LookPath("fuse-overlayfs"); err == nil { - opts.GraphDriverName = overlayDriver - opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} - } - } - if opts.GraphDriverName == overlayDriver { - for _, o := range systemOpts.GraphDriverOptions { - if strings.Contains(o, "ignore_chown_errors") { - opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) - break - } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + opts.GraphDriverName = overlayDriver + } + + if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break } } } @@ -271,7 +274,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { - config := new(tomlConfig) + config := new(TomlConfig) meta, err := toml.DecodeFile(configFile, &config) if err == nil { @@ -286,7 +289,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { } } - // Clear storeOptions of previos settings + // Clear storeOptions of previous settings *storeOptions = StoreOptions{} if config.Storage.Driver != "" { storeOptions.GraphDriverName = config.Storage.Driver @@ -295,6 +298,10 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { config.Storage.Driver = os.Getenv("STORAGE_DRIVER") storeOptions.GraphDriverName = config.Storage.Driver } + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + storeOptions.GraphDriverName = overlayDriver + } if storeOptions.GraphDriverName == "" { logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) } @@ -385,3 +392,39 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { func Options() StoreOptions { return defaultStoreOptions } + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + + if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { + return err + } + + f, err := os.Create(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) +} + +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go new file mode 100644 index 00000000000..d5ad50bc0bd --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_darwin.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go new file mode 100644 index 00000000000..d5976b6d581 --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_freebsd.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/var/run/containers/storage" + defaultGraphRoot string = "/var/db/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/local/share/containers/storage.conf" + defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go new file mode 100644 index 00000000000..d5ad50bc0bd --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_linux.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go new file mode 100644 index 00000000000..d5ad50bc0bd --- /dev/null +++ b/vendor/github.com/containers/storage/types/options_windows.go @@ -0,0 +1,17 @@ +package types + +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + +// defaultConfigFile path to the system wide storage.conf file +var ( + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false + // DefaultStoreOptions is a reasonable default set of options. + defaultStoreOptions StoreOptions +) diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 80d56041b06..37d4b79b01b 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -40,3 +40,35 @@ func validateMountOptions(mountOptions []string) error { } return nil } + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + var result []string + switch op { + case setNames: + // ignore all old names and just return new names + result = opParameters + case removeNames: + // remove given names from old names + result = make([]string, 0, len(oldNames)) + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + case addNames: + result = make([]string, 0, len(opParameters)+len(oldNames)) + result = append(result, opParameters...) + result = append(result, oldNames...) + default: + return result, errInvalidUpdateNameOperation + } + return dedupeNames(result), nil +} diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index bada4a8e3c8..b6bca4cef11 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -5755,7 +5755,6 @@ paths: property1: "string" property2: "string" IpcMode: "" - LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 @@ -8607,12 +8606,20 @@ paths: if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: - 201: + 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go new file mode 100644 index 00000000000..fa15c24b982 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/address_pools.go @@ -0,0 +1,84 @@ +package opts + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "strconv" + "strings" + + types "github.com/docker/libnetwork/ipamutils" +) + +// PoolsOpt is a Value type for parsing the default address pools definitions +type PoolsOpt struct { + Values []*types.NetworkToSplit +} + +// UnmarshalJSON fills values structure info from JSON input +func (p *PoolsOpt) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &(p.Values)) +} + +// Set predefined pools +func (p *PoolsOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + poolsDef := types.NetworkToSplit{} + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case "base": + poolsDef.Base = value + case "size": + size, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err) + } + poolsDef.Size = size + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + p.Values = append(p.Values, &poolsDef) + + return nil +} + +// Type returns the type of this option +func (p *PoolsOpt) Type() string { + return "pool-options" +} + +// String returns a string repr of this option +func (p *PoolsOpt) String() string { + var pools []string + for _, pool := range p.Values { + repr := fmt.Sprintf("%s %d", pool.Base, pool.Size) + pools = append(pools, repr) + } + return strings.Join(pools, ", ") +} + +// Value returns the mounts +func (p *PoolsOpt) Value() []*types.NetworkToSplit { + return p.Values +} + +// Name returns the flag name of this option +func (p *PoolsOpt) Name() string { + return "default-address-pools" +} diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go new file mode 100644 index 00000000000..97e1a8c8a26 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/env.go @@ -0,0 +1,30 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "os" + "strings" + + "github.com/pkg/errors" +) + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it obtains its value from the current environment +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate whatsoever, it's up to application inside docker +// to validate them or not. +// +// The only validation here is to check if name is empty, per #25099 +func ValidateEnv(val string) (string, error) { + arr := strings.SplitN(val, "=", 2) + if arr[0] == "" { + return "", errors.New("invalid environment variable: " + val) + } + if len(arr) > 1 { + return val, nil + } + if envVal, ok := os.LookupEnv(arr[0]); ok { + return arr[0] + "=" + envVal, nil + } + return val, nil +} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go new file mode 100644 index 00000000000..a3123adefe7 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,183 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "net" + "net/url" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/homedir" +) + +const ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = "tcp://" + DefaultHTTPHost + ":2375" + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = "tcp://" + DefaultHTTPHost + ":2376" + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` + // HostGatewayName is the string value that can be passed + // to the IPAddr section in --add-host that is replaced by + // the value of HostGatewayIP daemon config value + HostGatewayName = "host-gateway" +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDaemonHost + if host != "" { + _, err := parseDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for TLS + return val, nil +} + +// ParseHost and set defaults for a Daemon host string. +// defaultToTLS is preferred over defaultToUnixXDG. +func ParseHost(defaultToTLS, defaultToUnixXDG bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else if defaultToUnixXDG { + runtimeDir, err := homedir.GetRuntimeDir() + if err != nil { + return "", err + } + socket := filepath.Join(runtimeDir, "docker.sock") + host = "unix://" + socket + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + // Skip IPaddr validation for special "host-gateway" string + if arr[1] != HostGatewayName { + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + } + return val, nil +} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 00000000000..29864194acb --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package opts // import "github.com/docker/docker/opts" + +const ( + // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 + DefaultHTTPHost = "localhost" + + // DefaultHost constant defines the default host string used by docker on other hosts than Windows + DefaultHost = "unix://" + DefaultUnixSocket +) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 00000000000..576236ba42b --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,60 @@ +package opts // import "github.com/docker/docker/opts" + +const ( + // TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. + // + // On Windows, this mitigates a problem with the default options of running + // a docker client against a local docker daemon on TP5. + // + // What was found that if the default host is "localhost", even if the client + // (and daemon as this is local) is not physically on a network, and the DNS + // cache is flushed (ipconfig /flushdns), then the client will pause for + // exactly one second when connecting to the daemon for calls. For example + // using docker run windowsservercore cmd, the CLI will send a create followed + // by an attach. You see the delay between the attach finishing and the attach + // being seen by the daemon. + // + // Here's some daemon debug logs with additional debug spew put in. The + // AfterWriteJSON log is the very last thing the daemon does as part of the + // create call. The POST /attach is the second CLI call. Notice the second + // time gap. + // + // time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" + // time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" + // time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." + // time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... + // time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." + // time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." + // time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" + // time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" + // time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" + // time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" + // time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" + // ... 1 second gap here.... + // time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" + // time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" + // + // We suspect this is either a bug introduced in GOLang 1.5.1, or that a change + // in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, + // the Windows networking stack is supposed to resolve "localhost" internally, + // without hitting DNS, or even reading the hosts file (which is why localhost + // is commented out in the hosts file on Windows). + // + // We have validated that working around this using the actual IPv4 localhost + // address does not cause the delay. + // + // This does not occur with the docker client built with 1.4.3 on the same + // Windows build, regardless of whether the daemon is built using 1.5.1 + // or 1.4.3. It does not occur on Linux. We also verified we see the same thing + // on a cross-compiled Windows binary (from Linux). + // + // Final note: This is a mitigation, not a 'real' fix. It is still susceptible + // to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' + // explicitly. + + // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 + DefaultHTTPHost = "127.0.0.1" + + // DefaultHost constant defines the default host string used by docker on Windows + DefaultHost = "npipe://" + DefaultNamedPipe +) diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go new file mode 100644 index 00000000000..cfbff3a9fd6 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -0,0 +1,47 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parsable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go new file mode 100644 index 00000000000..60a093f28c9 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -0,0 +1,348 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "net" + "path" + "regexp" + "strings" + + units "github.com/docker/go-units" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + if len(*opts.values) == 0 { + return "" + } + return fmt.Sprintf("%v", *opts.values) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + *opts.values = append(*opts.values, value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return *opts.values +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len(*opts.values) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// WithValidator returns the ListOpts with validator set. +func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { + opts.validator = validator + return opts +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", opts.values) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, +// it does not use the reserved namespaces com.docker.*, io.docker.*, org.dockerproject.* +// and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + + lowered := strings.ToLower(val) + if strings.HasPrefix(lowered, "com.docker.") || strings.HasPrefix(lowered, "io.docker.") || + strings.HasPrefix(lowered, "org.dockerproject.") { + return "", fmt.Errorf( + "label %s is not allowed: the namespaces com.docker.*, io.docker.*, and org.dockerproject.* are reserved for internal use", + val) + } + + return val, nil +} + +// ValidateSingleGenericResource validates that a single entry in the +// generic resource list is valid. +// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't +func ValidateSingleGenericResource(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val) + } + return val, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) +type MemBytes int64 + +// String returns the string format of the human readable memory bytes +func (m *MemBytes) String() string { + // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. + // We return "0" in case value is 0 here so that the default value is hidden. + // (Sometimes "default 0 B" is actually misleading) + if m.Value() != 0 { + return units.BytesSize(float64(m.Value())) + } + return "0" +} + +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) + return err +} + +// Type returns the type +func (m *MemBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemBytes) Value() int64 { + return int64(*m) +} + +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalJSON(s []byte) error { + if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { + return fmt.Errorf("invalid size: %q", s) + } + val, err := units.RAMInBytes(string(s[1 : len(s)-1])) + *m = MemBytes(val) + return err +} diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go new file mode 100644 index 00000000000..6c889070e82 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts // import "github.com/docker/docker/opts" + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return *s.value +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go new file mode 100644 index 00000000000..4b9babf0a5c --- /dev/null +++ b/vendor/github.com/docker/docker/opts/runtime.go @@ -0,0 +1,79 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go new file mode 100644 index 00000000000..61cc58d4d32 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ulimit.go @@ -0,0 +1,81 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + + units "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index bb7e4e33695..296c96a6334 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -8,11 +8,6 @@ import ( "strings" ) -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - // PortBinding represents a binding between a Host IP address and a Host Port type PortBinding struct { // HostIP is the host IP Address @@ -158,30 +153,33 @@ type PortMapping struct { func splitParts(rawport string) (string, string, string) { parts := strings.Split(rawport, ":") n := len(parts) - containerport := parts[n-1] + containerPort := parts[n-1] switch n { case 1: - return "", "", containerport + return "", "", containerPort case 2: - return "", parts[0], containerport + return "", parts[0], containerPort case 3: - return parts[0], parts[1], containerport + return parts[0], parts[1], containerPort default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort } } // ParsePortSpec parses a port specification string into a slice of PortMappings func ParsePortSpec(rawPort string) ([]PortMapping, error) { var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) + ip, hostPort, containerPort := splitParts(rawPort) proto, containerPort = SplitProtoPort(containerPort) - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + if ip != "" && ip[0] == '[' { + // Strip [] from IPV6 addresses + rawIP, _, err := net.SplitHostPort(ip + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + } + ip = rawIP } if ip != "" && net.ParseIP(ip) == nil { return nil, fmt.Errorf("Invalid ip address: %s", ip) diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go index ce950171e31..b6eed145e1c 100644 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -43,7 +43,7 @@ type portMapSorter []portMapEntry func (s portMapSorter) Len() int { return len(s) } func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -// sort the port so that the order is: +// Less sorts the port so that the order is: // 1. port with larger specified bindings // 2. larger port // 3. port with tcp protocol @@ -58,7 +58,7 @@ func (s portMapSorter) Less(i, j int) bool { func SortPortMap(ports []Port, bindings PortMap) { s := portMapSorter{} for _, p := range ports { - if binding, ok := bindings[p]; ok { + if binding, ok := bindings[p]; ok && len(binding) > 0 { for _, b := range binding { s = append(s, portMapEntry{port: p, binding: b}) } diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go index 98e9a1dc61b..c897cb02ade 100644 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -2,11 +2,8 @@ package sockets import ( "net" - "net/url" "os" "strings" - - "golang.org/x/net/proxy" ) // GetProxyEnv allows access to the uppercase and the lowercase forms of @@ -20,32 +17,12 @@ func GetProxyEnv(key string) string { return proxyValue } -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil +// DialerFromEnvironment was previously used to configure a net.Dialer to route +// connections through a SOCKS proxy. +// DEPRECATED: SOCKS proxies are now supported by configuring only +// http.Transport.Proxy, and no longer require changing http.Transport.Dial. +// Therefore, only sockets.ConfigureTransport() needs to be called, and any +// sockets.DialerFromEnvironment() calls can be dropped. +func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) { + return direct, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index a1d7beb4d80..2e9e9006f61 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -3,14 +3,9 @@ package sockets import ( "errors" - "net" "net/http" - "time" ) -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") @@ -26,13 +21,6 @@ func ConfigureTransport(tr *http.Transport, proto, addr string) error { return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 386cf0dbbde..10d76342652 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,6 +3,7 @@ package sockets import ( + "context" "fmt" "net" "net/http" @@ -10,7 +11,10 @@ import ( "time" ) -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +const ( + defaultTimeout = 10 * time.Second + maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { @@ -18,8 +22,11 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { } // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) + dialer := &net.Dialer{ + Timeout: defaultTimeout, + } + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, proto, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go index 5c21644e1fe..7acafc5a2ad 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -1,6 +1,7 @@ package sockets import ( + "context" "net" "net/http" "time" @@ -15,8 +16,8 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { func configureNpipeTransport(tr *http.Transport, proto, addr string) error { // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index a8b5dbb6fdc..e7591e6edbf 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,5 +1,51 @@ // +build !windows +/* +Package sockets is a simple unix domain socket wrapper. + +Usage + +For example: + + import( + "fmt" + "net" + "os" + "github.com/docker/go-connections/sockets" + ) + + func main() { + l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets", + sockets.WithChown(0,0),sockets.WithChmod(0660)) + if err != nil { + panic(err) + } + echoStr := "hello" + + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte(echoStr)) + conn.Close() + } + }() + + conn, err := net.Dial("unix", path) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + if _, err := conn.Read(buf); err != nil { + panic(err) + } else if string(buf) != echoStr { + panic(fmt.Errorf("Msg may lost")) + } + } +*/ package sockets import ( @@ -8,25 +54,73 @@ import ( "syscall" ) -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { +// SockOption sets up socket file's creating option +type SockOption func(string) error + +// WithChown modifies the socket file's uid and gid +func WithChown(uid, gid int) SockOption { + return func(path string) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + } +} + +// WithChmod modifies socket file's access mode. +func WithChmod(mask os.FileMode) SockOption { + return func(path string) error { + if err := os.Chmod(path, mask); err != nil { + return err + } + return nil + } +} + +// NewUnixSocketWithOpts creates a unix socket with the specified options. +// By default, socket permissions are 0000 (i.e.: no access for anyone); pass +// WithChmod() and WithChown() to set the desired ownership and permissions. +// +// This function temporarily changes the system's "umask" to 0777 to work around +// a race condition between creating the socket and setting its permissions. While +// this should only be for a short duration, it may affect other processes that +// create files/directories during that period. +func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) + // net.Listen does not allow for permissions to be set. As a result, when + // specifying custom permissions ("WithChmod()"), there is a short time + // between creating the socket and applying the permissions, during which + // the socket permissions are Less restrictive than desired. + // + // To work around this limitation of net.Listen(), we temporarily set the + // umask to 0777, which forces the socket to be created with 000 permissions + // (i.e.: no access for anyone). After that, WithChmod() must be used to set + // the desired permissions. + // + // We don't use "defer" here, to reset the umask to its original value as soon + // as possible. Ideally we'd be able to detect if WithChmod() was passed as + // an option, and skip changing umask if default permissions are used. + origUmask := syscall.Umask(0777) l, err := net.Listen("unix", path) + syscall.Umask(origUmask) if err != nil { return nil, err } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err + + for _, op := range opts { + if err := op(path); err != nil { + _ = l.Close() + return nil, err + } } + return l, nil } + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 0ef3fdcb469..992968373eb 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -53,18 +53,9 @@ var acceptedCBCCiphers = []uint16{ // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, @@ -72,25 +63,25 @@ func ServerDefault(ops ...func(*tls.Config)) *tls.Config { } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. @@ -108,11 +99,11 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pem, err := ioutil.ReadFile(caFile) + pemData, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pem) { + if !certPool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } return certPool, nil @@ -141,7 +132,7 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key +// password when trying to decrypt a TLS private key func IsErrEncryptedKey(err error) bool { return errors.Cause(err) == x509.IncorrectPasswordError } @@ -157,8 +148,8 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { } var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go new file mode 100644 index 00000000000..d8215f8e78a --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go @@ -0,0 +1,16 @@ +// +build go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go new file mode 100644 index 00000000000..a5ba7f4a388 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go @@ -0,0 +1,15 @@ +// +build !go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} diff --git a/vendor/github.com/docker/go-events/.gitignore b/vendor/github.com/docker/go-events/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/docker/go-events/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/docker/go-events/CONTRIBUTING.md b/vendor/github.com/docker/go-events/CONTRIBUTING.md new file mode 100644 index 00000000000..d813af779b0 --- /dev/null +++ b/vendor/github.com/docker/go-events/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to Docker open source projects + +Want to hack on go-events? Awesome! Here are instructions to get you started. + +go-events is part of the [Docker](https://www.docker.com) project, and +follows the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 00000000000..6d630cf595e --- /dev/null +++ b/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-events/MAINTAINERS b/vendor/github.com/docker/go-events/MAINTAINERS new file mode 100644 index 00000000000..e414d82e964 --- /dev/null +++ b/vendor/github.com/docker/go-events/MAINTAINERS @@ -0,0 +1,46 @@ +# go-events maintainers file +# +# This file describes who runs the docker/go-events project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "aluzzardi", + "lk4d4", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/go-events/README.md b/vendor/github.com/docker/go-events/README.md new file mode 100644 index 00000000000..0acafc279a3 --- /dev/null +++ b/vendor/github.com/docker/go-events/README.md @@ -0,0 +1,117 @@ +# Docker Events Package + +[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) +[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) + +The Docker `events` package implements a composable event distribution package +for Go. + +Originally created to implement the [notifications in Docker Registry +2](https://github.com/docker/distribution/blob/master/docs/notifications.md), +we've found the pattern to be useful in other applications. This package is +most of the same code with slightly updated interfaces. Much of the internals +have been made available. + +## Usage + +The `events` package centers around a `Sink` type. Events are written with +calls to `Sink.Write(event Event)`. Sinks can be wired up in various +configurations to achieve interesting behavior. + +The canonical example is that employed by the +[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) +package. Let's say we have a type `httpSink` where we'd like to queue +notifications. As a rule, it should send a single http request and return an +error if it fails: + +```go +func (h *httpSink) Write(event Event) error { + p, err := json.Marshal(event) + if err != nil { + return err + } + body := bytes.NewReader(p) + resp, err := h.client.Post(h.url, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.Status != 200 { + return errors.New("unexpected status") + } + + return nil +} + +// implement (*httpSink).Close() +``` + +With just that, we can start using components from this package. One can call +`(*httpSink).Write` to send events as the body of a post request to a +configured URL. + +### Retries + +HTTP can be unreliable. The first feature we'd like is to have some retry: + +```go +hs := newHTTPSink(/*...*/) +retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) +``` + +We now have a sink that will retry events against the `httpSink` until they +succeed. The retry will backoff for one second after 5 consecutive failures +using the breaker strategy. + +### Queues + +This isn't quite enough. We we want a sink that doesn't block while we are +waiting for events to be sent. Let's add a `Queue`: + +```go +queue := NewQueue(retry) +``` + +Now, we have an unbounded queue that will work through all events sent with +`(*Queue).Write`. Events can be added asynchronously to the queue without +blocking the current execution path. This is ideal for use in an http request. + +### Broadcast + +It usually turns out that you want to send to more than one listener. We can +use `Broadcaster` to support this: + +```go +var broadcast = NewBroadcaster() // make it available somewhere in your application. +broadcast.Add(queue) // add your queue! +broadcast.Add(queue2) // and another! +``` + +With the above, we can now call `broadcast.Write` in our http handlers and have +all the events distributed to each queue. Because the events are queued, not +listener blocks another. + +### Extending + +For the most part, the above is sufficient for a lot of applications. However, +extending the above functionality can be done implementing your own `Sink`. The +behavior and semantics of the sink can be completely dependent on the +application requirements. The interface is provided below for reference: + +```go +type Sink { + Write(Event) error + Close() error +} +``` + +Application behavior can be controlled by how `Write` behaves. The examples +above are designed to queue the message and return as quickly as possible. +Other implementations may block until the event is committed to durable +storage. + +## Copyright and license + +Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, +Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 00000000000..5120078dfb8 --- /dev/null +++ b/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 00000000000..802cf51ffec --- /dev/null +++ b/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 00000000000..56db7c25103 --- /dev/null +++ b/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/vendor/github.com/docker/go-events/event.go b/vendor/github.com/docker/go-events/event.go new file mode 100644 index 00000000000..f0f1d9ea5ff --- /dev/null +++ b/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 00000000000..e6c0eb69dd5 --- /dev/null +++ b/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 00000000000..4bb770afc2e --- /dev/null +++ b/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 00000000000..b7f0a542252 --- /dev/null +++ b/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + failures uint64 // consecutive failure counter (needs to be 64-bit aligned) + config ExponentialBackoffConfig +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/vendor/github.com/docker/go-plugins-helpers/volume/api.go b/vendor/github.com/docker/go-plugins-helpers/volume/api.go index dcc2f3abf80..387e82ac551 100644 --- a/vendor/github.com/docker/go-plugins-helpers/volume/api.go +++ b/vendor/github.com/docker/go-plugins-helpers/volume/api.go @@ -1,7 +1,6 @@ package volume import ( - "log" "net/http" "github.com/docker/go-plugins-helpers/sdk" @@ -130,7 +129,6 @@ func NewHandler(driver Driver) *Handler { func (h *Handler) initMux() { h.HandleFunc(createPath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers createPath") req := &CreateRequest{} err := sdk.DecodeRequest(w, r, req) if err != nil { @@ -144,7 +142,6 @@ func (h *Handler) initMux() { sdk.EncodeResponse(w, struct{}{}, false) }) h.HandleFunc(removePath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers removePath") req := &RemoveRequest{} err := sdk.DecodeRequest(w, r, req) if err != nil { @@ -158,7 +155,6 @@ func (h *Handler) initMux() { sdk.EncodeResponse(w, struct{}{}, false) }) h.HandleFunc(mountPath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers mountPath") req := &MountRequest{} err := sdk.DecodeRequest(w, r, req) if err != nil { @@ -172,7 +168,6 @@ func (h *Handler) initMux() { sdk.EncodeResponse(w, res, false) }) h.HandleFunc(hostVirtualPath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers hostVirtualPath") req := &PathRequest{} err := sdk.DecodeRequest(w, r, req) if err != nil { @@ -186,7 +181,6 @@ func (h *Handler) initMux() { sdk.EncodeResponse(w, res, false) }) h.HandleFunc(getPath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers getPath") req := &GetRequest{} err := sdk.DecodeRequest(w, r, req) if err != nil { @@ -200,7 +194,6 @@ func (h *Handler) initMux() { sdk.EncodeResponse(w, res, false) }) h.HandleFunc(unmountPath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers unmountPath") req := &UnmountRequest{} err := sdk.DecodeRequest(w, r, req) if err != nil { @@ -214,7 +207,6 @@ func (h *Handler) initMux() { sdk.EncodeResponse(w, struct{}{}, false) }) h.HandleFunc(listPath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers listPath") res, err := h.driver.List() if err != nil { sdk.EncodeResponse(w, NewErrorResponse(err.Error()), true) @@ -224,7 +216,6 @@ func (h *Handler) initMux() { }) h.HandleFunc(capabilitiesPath, func(w http.ResponseWriter, r *http.Request) { - log.Println("Entering go-plugins-helpers capabilitiesPath") sdk.EncodeResponse(w, h.driver.Capabilities(), false) }) } diff --git a/vendor/github.com/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/docker/libnetwork/ipamutils/utils.go new file mode 100644 index 00000000000..3fd37cd8840 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipamutils/utils.go @@ -0,0 +1,135 @@ +// Package ipamutils provides utility functions for ipam management +package ipamutils + +import ( + "fmt" + "net" + "sync" +) + +var ( + // PredefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 + // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks` + PredefinedLocalScopeDefaultNetworks []*net.IPNet + // PredefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8 + // (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks` + PredefinedGlobalScopeDefaultNetworks []*net.IPNet + mutex sync.Mutex + localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, + {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16}, + {"192.168.0.0/16", 20}} + globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}} +) + +// NetworkToSplit represent a network that has to be split in chunks with mask length Size. +// Each subnet in the set is derived from the Base pool. Base is to be passed +// in CIDR format. +// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256 +// 10.10.[0-255].0/24 address pools +type NetworkToSplit struct { + Base string `json:"base"` + Size int `json:"size"` +} + +func init() { + var err error + if PredefinedGlobalScopeDefaultNetworks, err = splitNetworks(globalScopeDefaultNetworks); err != nil { + //we are going to panic in case of error as we should never get into this state + panic("InitAddressPools failed to initialize the global scope default address pool") + } + + if PredefinedLocalScopeDefaultNetworks, err = splitNetworks(localScopeDefaultNetworks); err != nil { + //we are going to panic in case of error as we should never get into this state + panic("InitAddressPools failed to initialize the local scope default address pool") + } +} + +// configDefaultNetworks configures local as well global default pool based on input +func configDefaultNetworks(defaultAddressPool []*NetworkToSplit, result *[]*net.IPNet) error { + mutex.Lock() + defer mutex.Unlock() + defaultNetworks, err := splitNetworks(defaultAddressPool) + if err != nil { + return err + } + *result = defaultNetworks + return nil +} + +// GetGlobalScopeDefaultNetworks returns PredefinedGlobalScopeDefaultNetworks +func GetGlobalScopeDefaultNetworks() []*net.IPNet { + mutex.Lock() + defer mutex.Unlock() + return PredefinedGlobalScopeDefaultNetworks +} + +// GetLocalScopeDefaultNetworks returns PredefinedLocalScopeDefaultNetworks +func GetLocalScopeDefaultNetworks() []*net.IPNet { + mutex.Lock() + defer mutex.Unlock() + return PredefinedLocalScopeDefaultNetworks +} + +// ConfigGlobalScopeDefaultNetworks configures global default pool. +// Ideally this will be called from SwarmKit as part of swarm init +func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { + if defaultAddressPool == nil { + defaultAddressPool = globalScopeDefaultNetworks + } + return configDefaultNetworks(defaultAddressPool, &PredefinedGlobalScopeDefaultNetworks) +} + +// ConfigLocalScopeDefaultNetworks configures local default pool. +// Ideally this will be called during libnetwork init +func ConfigLocalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { + if defaultAddressPool == nil { + return nil + } + return configDefaultNetworks(defaultAddressPool, &PredefinedLocalScopeDefaultNetworks) +} + +// splitNetworks takes a slice of networks, split them accordingly and returns them +func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) { + localPools := make([]*net.IPNet, 0, len(list)) + + for _, p := range list { + _, b, err := net.ParseCIDR(p.Base) + if err != nil { + return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err) + } + ones, _ := b.Mask.Size() + if p.Size <= 0 || p.Size < ones { + return nil, fmt.Errorf("invalid pools size: %d", p.Size) + } + localPools = append(localPools, splitNetwork(p.Size, b)...) + } + return localPools, nil +} + +func splitNetwork(size int, base *net.IPNet) []*net.IPNet { + one, bits := base.Mask.Size() + mask := net.CIDRMask(size, bits) + n := 1 << uint(size-one) + s := uint(bits - size) + list := make([]*net.IPNet, 0, n) + + for i := 0; i < n; i++ { + ip := copyIP(base.IP) + addIntToIP(ip, uint(i<= 0; i-- { + array[i] |= (byte)(ordinal & 0xff) + ordinal >>= 8 + } +} diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml index d5b0fe128a7..9921997daff 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -5,4 +5,4 @@ linters: disable-all: true enable: - gofumpt - - gci + - goimports diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE index 707a0ed49b2..20837167a9a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/LICENSE +++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013-2021, go-dockerclient authors +Copyright (c) go-dockerclient authors All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile index 431458441da..2f5d9fcc6a9 100644 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -7,12 +7,12 @@ test: pretest gotest .PHONY: golangci-lint golangci-lint: - cd /tmp && GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@latest + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest golangci-lint run .PHONY: staticcheck staticcheck: - cd /tmp && GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@master + go install honnef.co/go/tools/cmd/staticcheck@master staticcheck ./... .PHONY: lint diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go index bc949dc3590..d867c47363f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/auth.go +++ b/vendor/github.com/fsouza/go-dockerclient/auth.go @@ -10,7 +10,6 @@ import ( "encoding/json" "errors" "io" - "io/ioutil" "net/http" "os" "os/exec" @@ -272,7 +271,7 @@ func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) { return authStatus, err } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return authStatus, err } @@ -318,7 +317,7 @@ func NewAuthConfigurationsFromCredsHelpers(registry string) (*AuthConfiguration, func getHelperProviderFromDockerCfg(pathsToTry []string, registry string) (string, error) { for _, path := range pathsToTry { - content, err := ioutil.ReadFile(path) + content, err := os.ReadFile(path) if err != nil { // if we can't read the file keep going continue diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index d0814a5c0bd..1bbf611a23f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/httputil" @@ -240,19 +239,19 @@ func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString stri var keyPEMBlock []byte var caPEMCert []byte if _, err := os.Stat(cert); !os.IsNotExist(err) { - certPEMBlock, err = ioutil.ReadFile(cert) + certPEMBlock, err = os.ReadFile(cert) if err != nil { return nil, err } } if _, err := os.Stat(key); !os.IsNotExist(err) { - keyPEMBlock, err = ioutil.ReadFile(key) + keyPEMBlock, err = os.ReadFile(key) if err != nil { return nil, err } } if _, err := os.Stat(ca); !os.IsNotExist(err) { - caPEMCert, err = ioutil.ReadFile(ca) + caPEMCert, err = os.ReadFile(ca) if err != nil { return nil, err } @@ -565,10 +564,10 @@ func (c *Client) streamURL(method, url string, streamOptions streamOptions) erro protocol := c.endpointURL.Scheme address := c.endpointURL.Path if streamOptions.stdout == nil { - streamOptions.stdout = ioutil.Discard + streamOptions.stdout = io.Discard } if streamOptions.stderr == nil { - streamOptions.stderr = ioutil.Discard + streamOptions.stderr = io.Discard } if protocol == unixProtocol || protocol == namedPipeProtocol { @@ -798,10 +797,10 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close // will "hang" until the container terminates, even though you're not reading // stdout/stderr if hijackOptions.stdout == nil { - hijackOptions.stdout = ioutil.Discard + hijackOptions.stdout = io.Discard } if hijackOptions.stderr == nil { - hijackOptions.stderr = ioutil.Discard + hijackOptions.stderr = io.Discard } go func() { @@ -1024,7 +1023,7 @@ func newError(resp *http.Response) *Error { Message string `json:"message"` } defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) if err != nil { return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)} } diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go index cd2034304be..e9f13f3948d 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client_unix.go +++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package docker diff --git a/vendor/github.com/fsouza/go-dockerclient/plugin.go b/vendor/github.com/fsouza/go-dockerclient/plugin.go index be45607b90f..7951e362752 100644 --- a/vendor/github.com/fsouza/go-dockerclient/plugin.go +++ b/vendor/github.com/fsouza/go-dockerclient/plugin.go @@ -8,7 +8,7 @@ import ( "context" "encoding/json" "errors" - "io/ioutil" + "io" "net/http" ) @@ -53,7 +53,7 @@ func (c *Client) InstallPlugins(opts InstallPluginOptions) error { defer resp.Body.Close() // PullPlugin streams back the progress of the pull, we must consume the whole body // otherwise the pull will be canceled on the engine. - if _, err := ioutil.ReadAll(resp.Body); err != nil { + if _, err := io.ReadAll(resp.Body); err != nil { return err } return nil @@ -297,7 +297,7 @@ func (c *Client) RemovePlugin(opts RemovePluginOptions) (*PluginDetail, error) { return nil, err } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -386,7 +386,7 @@ func (c *Client) CreatePlugin(opts CreatePluginOptions) (string, error) { return "", err } defer resp.Body.Close() - containerNameBytes, err := ioutil.ReadAll(resp.Body) + containerNameBytes, err := io.ReadAll(resp.Body) if err != nil { return "", err } diff --git a/vendor/github.com/fsouza/go-dockerclient/system.go b/vendor/github.com/fsouza/go-dockerclient/system.go index 46b9faf00e2..f1791f6cea5 100644 --- a/vendor/github.com/fsouza/go-dockerclient/system.go +++ b/vendor/github.com/fsouza/go-dockerclient/system.go @@ -9,7 +9,6 @@ import ( // VolumeUsageData represents usage data from the docker system api // More Info Here https://dockr.ly/2PNzQyO type VolumeUsageData struct { - // The number of containers referencing this volume. This field // is set to `-1` if the reference-count is not available. // diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go index f27a7bbf21f..cbe72206550 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -7,7 +7,6 @@ package docker import ( "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -112,7 +111,7 @@ func validateContextDirectory(srcPath string, excludes []string) error { func parseDockerignore(root string) ([]string, error) { var excludes []string - ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) + ignore, err := os.ReadFile(path.Join(root, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return excludes, fmt.Errorf("error reading .dockerignore: %w", err) } diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/google/shlex/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README new file mode 100644 index 00000000000..c86bcc066fd --- /dev/null +++ b/vendor/github.com/google/shlex/README @@ -0,0 +1,2 @@ +go-shlex is a simple lexer for go that supports shell-style quoting, +commenting, and escaping. diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go new file mode 100644 index 00000000000..d98308bce38 --- /dev/null +++ b/vendor/github.com/google/shlex/shlex.go @@ -0,0 +1,416 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. + +The basic use case uses the default ASCII lexer to split a string into sub-strings: + + shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} + +To process a stream of strings: + + l := NewLexer(os.Stdin) + for ; token, err := l.Next(); err != nil { + // process token + } + +To access the raw token stream (which includes tokens for comments): + + t := NewTokenizer(os.Stdin) + for ; token, err := t.Next(); err != nil { + // process token + } + +*/ +package shlex + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// TokenType is a top-level token classification: A word, space, comment, unknown. +type TokenType int + +// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. +type runeTokenClass int + +// the internal state used by the lexer state machine +type lexerState int + +// Token is a (type, value) pair representing a lexographical token. +type Token struct { + tokenType TokenType + value string +} + +// Equal reports whether tokens a, and b, are equal. +// Two tokens are equal if both their types and values are equal. A nil token can +// never be equal to another token. +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +// Named classes of UTF-8 runes +const ( + spaceRunes = " \t\r\n" + escapingQuoteRunes = `"` + nonEscapingQuoteRunes = "'" + escapeRunes = `\` + commentRunes = "#" +) + +// Classes of rune token +const ( + unknownRuneClass runeTokenClass = iota + spaceRuneClass + escapingQuoteRuneClass + nonEscapingQuoteRuneClass + escapeRuneClass + commentRuneClass + eofRuneClass +) + +// Classes of lexographic token +const ( + UnknownToken TokenType = iota + WordToken + SpaceToken + CommentToken +) + +// Lexer state machine states +const ( + startState lexerState = iota // no runes have been seen + inWordState // processing regular runes in a word + escapingState // we have just consumed an escape rune; the next rune is literal + escapingQuotedState // we have just consumed an escape rune within a quoted string + quotingEscapingState // we are within a quoted string that supports escaping ("...") + quotingState // we are within a string that does not support escaping ('...') + commentState // we are within a comment (everything following an unquoted or unescaped # +) + +// tokenClassifier is used for classifying rune characters. +type tokenClassifier map[rune]runeTokenClass + +func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { + for _, runeChar := range runes { + typeMap[runeChar] = tokenType + } +} + +// newDefaultClassifier creates a new classifier for ASCII characters. +func newDefaultClassifier() tokenClassifier { + t := tokenClassifier{} + t.addRuneClass(spaceRunes, spaceRuneClass) + t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) + t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) + t.addRuneClass(escapeRunes, escapeRuneClass) + t.addRuneClass(commentRunes, commentRuneClass) + return t +} + +// ClassifyRune classifiees a rune +func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { + return t[runeVal] +} + +// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. +type Lexer Tokenizer + +// NewLexer creates a new lexer from an input stream. +func NewLexer(r io.Reader) *Lexer { + + return (*Lexer)(NewTokenizer(r)) +} + +// Next returns the next word, or an error. If there are no more words, +// the error will be io.EOF. +func (l *Lexer) Next() (string, error) { + for { + token, err := (*Tokenizer)(l).Next() + if err != nil { + return "", err + } + switch token.tokenType { + case WordToken: + return token.value, nil + case CommentToken: + // skip comments + default: + return "", fmt.Errorf("Unknown token type: %v", token.tokenType) + } + } +} + +// Tokenizer turns an input stream into a sequence of typed tokens +type Tokenizer struct { + input bufio.Reader + classifier tokenClassifier +} + +// NewTokenizer creates a new tokenizer from an input stream. +func NewTokenizer(r io.Reader) *Tokenizer { + input := bufio.NewReader(r) + classifier := newDefaultClassifier() + return &Tokenizer{ + input: *input, + classifier: classifier} +} + +// scanStream scans the stream for the next token using the internal state machine. +// It will panic if it encounters a rune which it does not know how to handle. +func (t *Tokenizer) scanStream() (*Token, error) { + state := startState + var tokenType TokenType + var value []rune + var nextRune rune + var nextRuneType runeTokenClass + var err error + + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + + if err == io.EOF { + nextRuneType = eofRuneClass + err = nil + } else if err != nil { + return nil, err + } + + switch state { + case startState: // no runes read yet + { + switch nextRuneType { + case eofRuneClass: + { + return nil, io.EOF + } + case spaceRuneClass: + { + } + case escapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingState + } + case escapeRuneClass: + { + tokenType = WordToken + state = escapingState + } + case commentRuneClass: + { + tokenType = CommentToken + state = commentState + } + default: + { + tokenType = WordToken + value = append(value, nextRune) + state = inWordState + } + } + } + case inWordState: // in a regular word + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + state = quotingState + } + case escapeRuneClass: + { + state = escapingState + } + default: + { + value = append(value, nextRune) + } + } + } + case escapingState: // the rune after an escape character + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = inWordState + value = append(value, nextRune) + } + } + } + case escapingQuotedState: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = quotingEscapingState + value = append(value, nextRune) + } + } + } + case quotingEscapingState: // in escaping double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = inWordState + } + case escapeRuneClass: + { + state = escapingQuotedState + } + default: + { + value = append(value, nextRune) + } + } + } + case quotingState: // in non-escaping single quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case nonEscapingQuoteRuneClass: + { + state = inWordState + } + default: + { + value = append(value, nextRune) + } + } + } + case commentState: // in a comment + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + if nextRune == '\n' { + state = startState + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } else { + value = append(value, nextRune) + } + } + default: + { + value = append(value, nextRune) + } + } + } + default: + { + return nil, fmt.Errorf("Unexpected state: %v", state) + } + } + } +} + +// Next returns the next token in the stream. +func (t *Tokenizer) Next() (*Token, error) { + return t.scanStream() +} + +// Split partitions a string into a slice of strings. +func Split(s string) ([]string, error) { + l := NewLexer(strings.NewReader(s)) + subStrings := make([]string, 0) + for { + word, err := l.Next() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } +} diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore deleted file mode 100644 index 6d9953c3c6a..00000000000 --- a/vendor/github.com/hpcloud/tail/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.test -.go - diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/hpcloud/tail/.travis.yml deleted file mode 100644 index 9cf8bb7fc5f..00000000000 --- a/vendor/github.com/hpcloud/tail/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -script: - - go test -race -v ./... - -go: - - 1.4 - - 1.5 - - 1.6 - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get gopkg.in/fsnotify.v1 - - go get gopkg.in/tomb.v1 diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md deleted file mode 100644 index 422790c073d..00000000000 --- a/vendor/github.com/hpcloud/tail/CHANGES.md +++ /dev/null @@ -1,63 +0,0 @@ -# API v1 (gopkg.in/hpcloud/tail.v1) - -## April, 2016 - -* Migrated to godep, as depman is not longer supported -* Introduced golang vendoring feature -* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file - -## July, 2015 - -* Fix inotify watcher leak; remove `Cleanup` (#51) - -# API v0 (gopkg.in/hpcloud/tail.v0) - -## June, 2015 - -* Don't return partial lines (PR #40) -* Use stable version of fsnotify (#46) - -## July, 2014 - -* Fix tail for Windows (PR #36) - -## May, 2014 - -* Improved rate limiting using leaky bucket (PR #29) -* Fix odd line splitting (PR #30) - -## Apr, 2014 - -* LimitRate now discards read buffer (PR #28) -* allow reading of longer lines if MaxLineSize is unset (PR #24) -* updated deps.json to latest fsnotify (441bbc86b1) - -## Feb, 2014 - -* added `Config.Logger` to suppress library logging - -## Nov, 2013 - -* add Cleanup to remove leaky inotify watches (PR #20) - -## Aug, 2013 - -* redesigned Location field (PR #12) -* add tail.Tell (PR #14) - -## July, 2013 - -* Rate limiting (PR #10) - -## May, 2013 - -* Detect file deletions/renames in polling file watcher (PR #1) -* Detect file truncation -* Fix potential race condition when reopening the file (issue 5) -* Fix potential blocking of `tail.Stop` (issue 4) -* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop -* Support Follow=false - -## Feb, 2013 - -* Initial open source release diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile deleted file mode 100644 index cd297b940a9..00000000000 --- a/vendor/github.com/hpcloud/tail/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM golang - -RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/ -ADD . $GOPATH/src/github.com/hpcloud/tail/ - -# expecting to fetch dependencies successfully. -RUN go get -v github.com/hpcloud/tail - -# expecting to run the test successfully. -RUN go test -v github.com/hpcloud/tail - -# expecting to install successfully -RUN go install -v github.com/hpcloud/tail -RUN go install -v github.com/hpcloud/tail/cmd/gotail - -RUN $GOPATH/bin/gotail -h || true - -ENV PATH $GOPATH/bin:$PATH -CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile deleted file mode 100644 index 6591b24fc11..00000000000 --- a/vendor/github.com/hpcloud/tail/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -default: test - -test: *.go - go test -v -race ./... - -fmt: - gofmt -w . - -# Run the test in an isolated environment. -fulltest: - docker build -t hpcloud/tail . diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md deleted file mode 100644 index fb7fbc26c67..00000000000 --- a/vendor/github.com/hpcloud/tail/README.md +++ /dev/null @@ -1,28 +0,0 @@ -[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail) -[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail) - -# Go package for tail-ing files - -A Go package striving to emulate the features of the BSD `tail` program. - -```Go -t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) -for line := range t.Lines { - fmt.Println(line.Text) -} -``` - -See [API documentation](http://godoc.org/github.com/hpcloud/tail). - -## Log rotation - -Tail comes with full support for truncation/move detection as it is -designed to work with log rotation tools. - -## Installing - - go get github.com/hpcloud/tail/... - -## Windows support - -This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/hpcloud/tail/appveyor.yml deleted file mode 100644 index d370055b6f0..00000000000 --- a/vendor/github.com/hpcloud/tail/appveyor.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 0.{build} -skip_tags: true -cache: C:\Users\appveyor\AppData\Local\NuGet\Cache -build_script: -- SET GOPATH=c:\workspace -- go test -v -race ./... -test: off -clone_folder: c:\workspace\src\github.com\hpcloud\tail -branches: - only: - - master diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go deleted file mode 100644 index bc4dc3357ab..00000000000 --- a/vendor/github.com/hpcloud/tail/tail_posix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd - -package tail - -import ( - "os" -) - -func OpenFile(name string) (file *os.File, err error) { - return os.Open(name) -} diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go deleted file mode 100644 index ef2cfca1b74..00000000000 --- a/vendor/github.com/hpcloud/tail/tail_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows - -package tail - -import ( - "github.com/hpcloud/tail/winfile" - "os" -) - -func OpenFile(name string) (file *os.File, err error) { - return winfile.OpenFile(name, os.O_RDONLY, 0) -} diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go index 412ff549739..6dc9600c860 100644 --- a/vendor/github.com/jinzhu/copier/copier.go +++ b/vendor/github.com/jinzhu/copier/copier.go @@ -24,6 +24,13 @@ const ( // Denotes that the value as been copied hasCopied + + // Some default converter types for a nicer syntax + String string = "" + Bool bool = false + Int int = 0 + Float32 float32 = 0 + Float64 float64 = 0 ) // Option sets copy options @@ -32,6 +39,18 @@ type Option struct { // struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go) IgnoreEmpty bool DeepCopy bool + Converters []TypeConverter +} + +type TypeConverter struct { + SrcType interface{} + DstType interface{} + Fn func(src interface{}) (interface{}, error) +} + +type converterPair struct { + SrcType reflect.Type + DstType reflect.Type } // Tag Flags @@ -59,12 +78,27 @@ func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) { var ( - isSlice bool - amount = 1 - from = indirect(reflect.ValueOf(fromValue)) - to = indirect(reflect.ValueOf(toValue)) + isSlice bool + amount = 1 + from = indirect(reflect.ValueOf(fromValue)) + to = indirect(reflect.ValueOf(toValue)) + converters map[converterPair]TypeConverter ) + // save convertes into map for faster lookup + for i := range opt.Converters { + if converters == nil { + converters = make(map[converterPair]TypeConverter) + } + + pair := converterPair{ + SrcType: reflect.TypeOf(opt.Converters[i].SrcType), + DstType: reflect.TypeOf(opt.Converters[i].DstType), + } + + converters[pair] = opt.Converters[i] + } + if !to.CanAddr() { return ErrInvalidCopyDestination } @@ -113,13 +147,16 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) for _, k := range from.MapKeys() { toKey := indirect(reflect.New(toType.Key())) - if !set(toKey, k, opt.DeepCopy) { + if !set(toKey, k, opt.DeepCopy, converters) { return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key()) } - elemType, _ := indirectType(toType.Elem()) + elemType := toType.Elem() + if elemType.Kind() != reflect.Slice { + elemType, _ = indirectType(elemType) + } toValue := indirect(reflect.New(elemType)) - if !set(toValue, from.MapIndex(k), opt.DeepCopy) { + if !set(toValue, from.MapIndex(k), opt.DeepCopy, converters) { if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil { return err } @@ -148,7 +185,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem())) } - if !set(to.Index(i), from.Index(i), opt.DeepCopy) { + if !set(to.Index(i), from.Index(i), opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt) if err != nil { @@ -203,6 +240,8 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) // check source if source.IsValid() { + copyUnexportedStructFields(dest, source) + // Copy from source field to dest field or method fromTypeFields := deepFields(fromType) for _, field := range fromTypeFields { @@ -249,7 +288,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) toField := dest.FieldByName(destFieldName) if toField.IsValid() { if toField.CanSet() { - if !set(toField, fromField, opt.DeepCopy) { + if !set(toField, fromField, opt.DeepCopy, converters) { if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil { return err } @@ -291,7 +330,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if toField := dest.FieldByName(destFieldName); toField.IsValid() && toField.CanSet() { values := fromMethod.Call([]reflect.Value{}) if len(values) >= 1 { - set(toField, values[0], opt.DeepCopy) + set(toField, values[0], opt.DeepCopy, converters) } } } @@ -303,7 +342,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest.Addr())) } else { - if !set(to.Index(i), dest.Addr(), opt.DeepCopy) { + if !set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt) if err != nil { @@ -315,7 +354,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest)) } else { - if !set(to.Index(i), dest, opt.DeepCopy) { + if !set(to.Index(i), dest, opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt) if err != nil { @@ -334,6 +373,24 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) return } +func copyUnexportedStructFields(to, from reflect.Value) { + if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) { + return + } + + // create a shallow copy of 'to' to get all fields + tmp := indirect(reflect.New(to.Type())) + tmp.Set(from) + + // revert exported fields + for i := 0; i < to.NumField(); i++ { + if tmp.Field(i).CanSet() { + tmp.Field(i).Set(to.Field(i)) + } + } + to.Set(tmp) +} + func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool { if !ignoreEmpty { return false @@ -348,10 +405,15 @@ func deepFields(reflectType reflect.Type) []reflect.StructField { for i := 0; i < reflectType.NumField(); i++ { v := reflectType.Field(i) - if v.Anonymous { - fields = append(fields, deepFields(v.Type)...) - } else { + // PkgPath is the package path that qualifies a lower case (unexported) + // field name. It is empty for upper case (exported) field names. + // See https://golang.org/ref/spec#Uniqueness_of_identifiers + if v.PkgPath == "" { fields = append(fields, v) + if v.Anonymous { + // also consider fields of anonymous fields as fields of the root + fields = append(fields, deepFields(v.Type)...) + } } } @@ -376,8 +438,14 @@ func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) { return reflectType, isPtr } -func set(to, from reflect.Value, deepCopy bool) bool { +func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) bool { if from.IsValid() { + if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil { + return false + } else if ok { + return true + } + if to.Kind() == reflect.Ptr { // set `to` to nil if from is nil if from.Kind() == reflect.Ptr && from.IsNil() { @@ -411,6 +479,9 @@ func set(to, from reflect.Value, deepCopy bool) bool { toKind = reflect.TypeOf(to.Interface()).Kind() } } + if from.Kind() == reflect.Ptr && from.IsNil() { + return true + } if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice { return false } @@ -452,7 +523,7 @@ func set(to, from reflect.Value, deepCopy bool) bool { to.Set(rv) } } else if from.Kind() == reflect.Ptr { - return set(to, from.Elem(), deepCopy) + return set(to, from.Elem(), deepCopy, converters) } else { return false } @@ -461,6 +532,33 @@ func set(to, from reflect.Value, deepCopy bool) bool { return true } +// lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field. +func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converterPair]TypeConverter) (copied bool, err error) { + pair := converterPair{ + SrcType: from.Type(), + DstType: to.Type(), + } + + if cnv, ok := converters[pair]; ok { + result, err := cnv.Fn(from.Interface()) + + if err != nil { + return false, err + } + + if result != nil { + to.Set(reflect.ValueOf(result)) + } else { + // in case we've got a nil value to copy + to.Set(reflect.Zero(to.Type())) + } + + return true, nil + } + + return false, nil +} + // parseTags Parses struct tags and returns uint8 bit flags. func parseTags(tag string) (flg uint8, name string, err error) { for _, t := range strings.Split(tag, ",") { diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore index b35f8449bf2..d31b3781527 100644 --- a/vendor/github.com/klauspost/compress/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -23,3 +23,10 @@ _testmain.go *.test *.prof /s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index e8ff994f8bc..c3ec9d8a78c 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -17,6 +17,59 @@ This package provides various compression algorithms. # changelog +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +
+ See Details +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. +
+ +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + * Jan 11, 2022 (v1.14.1) * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) @@ -53,6 +106,9 @@ This package provides various compression algorithms. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ See changes to v1.12.x + * May 25, 2021 (v1.12.3) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) @@ -74,9 +130,10 @@ This package provides various compression algorithms. * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
- See changes prior to v1.12.1 + See changes to v1.11.x * Mar 26, 2021 (v1.11.13) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) @@ -135,7 +192,7 @@ This package provides various compression algorithms.
- See changes prior to v1.11.0 + See changes to v1.10.x * July 8, 2020 (v1.10.11) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) @@ -297,11 +354,6 @@ This package provides various compression algorithms. # deflate usage -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | old import | new import | Documentation @@ -323,6 +375,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using the stateless compress described below. +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0b2e54972cd..d55ea2a7759 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDecode { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index fd49efd75b1..25f6d1108fc 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "fmt" "io" + "math" ) const ( @@ -24,6 +25,10 @@ const ( codegenCodeCount = 19 badCode = 255 + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since @@ -36,8 +41,11 @@ const ( bufferSize = bufferFlushSize + 8 ) +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + // The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]int8{ +var lengthExtraBits = [32]uint8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, @@ -51,6 +59,9 @@ var lengthBase = [32]uint8{ 64, 80, 96, 112, 128, 160, 192, 224, 255, } +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + // offset code word extra bits. var offsetExtraBits = [32]int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, @@ -78,10 +89,10 @@ func init() { for i := range offsetCombined[:] { // Don't use extended window values... - if offsetBase[i] > 0x006000 { + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { continue } - offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) } } @@ -97,7 +108,7 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint16 + nbits uint8 nbytes uint8 lastHuffMan bool literalEncoding *huffmanEncoder @@ -215,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { @@ -571,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding - var size = w.fixedSize(extraBits) + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } // Dynamic Huffman? var numCodegens int @@ -672,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size = reuseSize } - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } // Check if we get a reasonable size decrease. if storable && ssize <= size { @@ -717,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) // Store predefined, if we don't get a reasonable improvement. - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) return } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return } if storable && ssize <= size { @@ -833,9 +851,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range tokens { - if t < matchType { + if t < 256 { //w.writeCode(lits[t.literal()]) - c := lits[t.literal()] + c := lits[t] bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { @@ -858,12 +876,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() - lengthCode := lengthCode(length) + lengthCode := lengthCode(length) & 31 if false { - w.writeCode(lengths[lengthCode&31]) + w.writeCode(lengths[lengthCode]) } else { // inlined - c := lengths[lengthCode&31] + c := lengths[lengthCode] bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { @@ -883,10 +901,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } - extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode&31]) + extraLength := int32(length - lengthBase[lengthCode]) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { @@ -907,10 +925,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } // Write the offset offset := t.offset() - offsetCode := offset >> 16 - offset &= matchOffsetOnlyMask + offsetCode := (offset >> 16) & 31 if false { - w.writeCode(offs[offsetCode&31]) + w.writeCode(offs[offsetCode]) } else { // inlined c := offs[offsetCode] @@ -932,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } } - offsetComb := offsetCombined[offsetCode] - if offsetComb > 1<<16 { + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63) - nbits += uint16(offsetComb >> 16) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1002,6 +1020,29 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 histogram(input, w.literalFreq[:numLiterals], fill) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } w.literalFreq[endBlockMarker] = 1 w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) if fill { @@ -1019,8 +1060,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. - ssize, storable := w.storedSize(input) if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -1031,7 +1074,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if estBits < reuseSize { if debugDeflate { - //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) @@ -1065,6 +1108,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Go 1.16 LOVES having these on stack. At least 1.5x the speed. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } // Unroll, write 3 codes/loop. // Fastest number of unrolls. for len(input) > 3 { @@ -1074,13 +1120,16 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) bits >>= (n * 8) & 63 nbits -= n * 8 - nbytes += uint8(n) + nbytes += n } if nbytes >= bufferFlushSize { if w.err != nil { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } @@ -1096,13 +1145,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= uint64(c.code) << (nbits & 63) - nbits += c.len - if debugDeflate { - count += int(c.len) - } if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1114,17 +1156,33 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= uint64(c.code) << (nbits & 63) + nbits += c.len + if debugDeflate { + count += int(c.len) + } } // Restore... w.bits, w.nbits, w.nbytes = bits, nbits, nbytes if debugDeflate { - fmt.Println("wrote", count/8, "bytes") + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() } + if eof || sync { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index f35e00261d3..9ab497c275b 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -17,7 +17,8 @@ const ( // hcode is a huffman code with a bit code and bit length. type hcode struct { - code, len uint16 + code uint16 + len uint8 } type huffmanEncoder struct { @@ -56,7 +57,7 @@ type levelInfo struct { } // set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint16) { +func (h *hcode) set(code uint16, length uint8) { h.len = length h.code = code } @@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { var ch uint16 for ch = 0; ch < literalCount; ch++ { var bits uint16 - var size uint16 + var size uint8 switch { case ch < 144: // size 8, 000110000 .. 10111111 @@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { bits = ch + 192 - 280 size = 8 } - codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + codes[ch] = hcode{code: reverseBits(bits, size), len: size} } return h } @@ -187,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: int32(list[1].freq), - nextCharFreq: int32(list[2].freq), - nextPairFreq: int32(list[0].freq) + int32(list[1].freq), + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, } leafCounts[level][level] = 2 if level == 1 { @@ -205,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 - level := maxBits - for { + level := uint32(maxBits) + for level < 16 { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. @@ -238,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. - copy(leafCounts[level][:level], leafCounts[level-1][:level]) + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } levels[l.level-1].needed = 2 } @@ -296,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN sortByLiteral(chunk) for _, node := range chunk { - h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)} code++ } list = list[0 : len(list)-int(bits)] @@ -309,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies @@ -317,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list[count] = literalNode{uint16(i), f} count++ } else { - list[count] = literalNode{} - h.codes[i].len = 0 + codes[i].len = 0 } } - list[len(freq)] = literalNode{} + list[count] = literalNode{} list = list[:count] if count <= 2 { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index d5f62f6a2ca..414c0bea9fa 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -36,6 +36,13 @@ type lengthExtra struct { var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder @@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error { return nil } -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlockGeneric() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var n uint // number of bits extra - var length int - var err error - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 - case v < maxNumLit: - length = 258 - n = 0 - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) - } - f.err = err - return - } - dist = uint32(sym) - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index cc6db27925c..61342b6b88f 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() { ) fr := f.r.(*bytes.Buffer) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -39,41 +44,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -83,15 +82,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -101,9 +102,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -111,25 +113,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -137,12 +141,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -152,38 +156,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -197,9 +198,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -223,9 +227,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -238,20 +243,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() { ) fr := f.r.(*bytes.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -283,41 +295,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -327,15 +333,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -345,9 +353,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -355,25 +364,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -381,12 +392,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -396,38 +407,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -441,9 +449,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -467,9 +478,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -482,20 +494,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() { ) fr := f.r.(*bufio.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -527,41 +546,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -571,15 +584,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBufioReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -589,9 +604,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -599,25 +615,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -625,12 +643,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -640,38 +658,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -685,9 +700,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -711,9 +729,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -726,20 +745,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() { ) fr := f.r.(*strings.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + switch f.stepState { case stateInit: goto readLiteral @@ -771,41 +797,286 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -815,15 +1086,17 @@ readLiteral: var length int switch { case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -833,9 +1106,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -843,25 +1117,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -869,12 +1145,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -884,38 +1160,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -929,9 +1202,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -955,9 +1231,10 @@ readLiteral: } // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) } f.err = CorruptInputError(f.roffset) return @@ -970,20 +1247,22 @@ readLiteral: copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + cnt = dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } func (f *decompressor) huffmanBlockDecoder() func() { @@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() { return f.huffmanBufioReader case *strings.Reader: return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader default: - return f.huffmanBlockGeneric + return f.huffmanGenericReader } } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 1e5eea3968a..0f14f8d63b4 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,6 +1,10 @@ package flate -import "fmt" +import ( + "encoding/binary" + "fmt" + "math/bits" +) // fastGen maintains the table for matches, // and the previous byte block for level 2. @@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { @@ -125,11 +154,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 234c4389ab3..8603fbd55ad 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -134,7 +134,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index c22b4244a5c..039639f8989 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -5,7 +5,7 @@ import "fmt" // fastEncL3 type fastEncL3 struct { fastGen - table [tableSize]tableEntryPrev + table [1 << 16]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. @@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits ) if debugDeflate && e.cur < 0 { @@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) s = nextS nextS = s + 1 + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -141,7 +143,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -156,7 +166,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, Cur: tableEntry{offset: e.cur + t}, @@ -165,30 +175,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { goto emitRemainder } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3}, + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 5 { + nextHash := hash4u(load3232(src, i), tableBits) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} } - x >>= 8 - prevHash = hash(uint32(x)) + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 - prevHash = hash(uint32(x)) + prevHash = hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 - currHash := hash(uint32(x)) + currHash := hash4u(uint32(x), tableBits) candidates := e.table[currHash] cv = uint32(x) e.table[currHash] = tableEntryPrev{ @@ -200,15 +211,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { candidate = candidates.Cur minOffset := e.cur + s - (maxMatchOffset - 4) - if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { - // We only check if value mismatches. - // Offset will always be invalid in other cases. + if candidate.offset > minOffset { + if cv == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } candidate = candidates.Prev if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } + // Match at prev... + continue } } cv = uint32(x >> 8) diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index e62f0c02b1e..1cbffa1aefe 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -135,7 +135,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 293a3a320b7..4b97576bd38 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -210,7 +210,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index a709977ec49..62888edf3cd 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -243,7 +243,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if false { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 53e89912463..544162a4318 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index 3a9618ee193..d818790c132 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,11 +13,10 @@ import ( ) const ( - // From top - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 5 bits offsetcode - // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits lengthShift = 22 offsetMask = 1< 258 { // We need to have at least baseMatchLength left over for next loop. - xl = 258 - baseMatchLength + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } } xlength -= xl xl -= baseMatchLength t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc]++ + t.offHist[oc&31]++ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } -// The code is never more than 8 bits, but is returned as uint32 for convenience. -func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) } +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e8868a..451160edda3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -263,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { return uint16(b.value >> ((64 - n) & 63)) } +// peekTopBits(n) is equvialent to peekBitFast(64 - n) +func (b *bitReaderShifted) peekTopBits(n uint8) uint16 { + return uint16(b.value >> n) +} + func (b *bitReaderShifted) advance(n uint8) { b.bitsRead += n b.value <<= n & 63 @@ -318,10 +225,17 @@ func (b *bitReaderShifted) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 8323dc05389..bc95ac623bd 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 2a06bd1a7e5..04f6529955e 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) @@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} } // Decompress1X will decompress a 1X encoded stream. @@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { dt := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 for br.off >= 8 { @@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { bitsLeft -= nBits dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 switch d.actualTableLog { @@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } } default: + d.bufs.Put(bufs) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 const shift = 56 @@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -688,195 +721,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -916,12 +764,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -942,8 +790,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -951,8 +799,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -960,8 +808,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -969,8 +817,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { @@ -987,8 +835,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -996,8 +844,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1005,8 +853,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1014,25 +862,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1040,23 +889,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1076,7 +933,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1084,16 +942,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -1135,12 +999,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -1150,104 +1014,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1257,21 +1126,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1291,7 +1166,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1299,16 +1175,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000000..3415e5da226 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,148 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// that uses an asm implementation of its main loop. +package huff0 + +import ( + "errors" + "fmt" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr0 *bitReaderShifted + pbr1 *bitReaderShifted + pbr2 *bitReaderShifted + pbr3 *bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr0: &br[0], + pbr1: &br[1], + pbr2: &br[2], + pbr3: &br[3], + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000000..06287f56859 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,662 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $8-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 32(AX), SI + MOVQ 40(AX), DI + MOVQ DI, BX + MOVQ 72(AX), CX + MOVQ CX, (SP) + MOVQ 48(AX), R8 + MOVQ 56(AX), R9 + MOVQ (AX), R10 + MOVQ 8(AX), R11 + MOVQ 16(AX), R12 + MOVQ 24(AX), R13 + + // Main loop +main_loop: + MOVQ BX, DI + CMPQ DI, (SP) + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R14 + MOVBQZX 40(R10), R15 + CMPQ R15, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R10), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R10) + ORQ BP, R14 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br0.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R10) + MOVB R15, 40(R10) + ADDQ R8, DI + + // br1.fillFast32() + MOVQ 32(R11), R14 + MOVBQZX 40(R11), R15 + CMPQ R15, $0x20 + JBE skip_fill1 + MOVQ 24(R11), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R11), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R11) + ORQ BP, R14 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br1.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R11) + MOVB R15, 40(R11) + ADDQ R8, DI + + // br2.fillFast32() + MOVQ 32(R12), R14 + MOVBQZX 40(R12), R15 + CMPQ R15, $0x20 + JBE skip_fill2 + MOVQ 24(R12), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R12), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R12) + ORQ BP, R14 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br2.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R12) + MOVB R15, 40(R12) + ADDQ R8, DI + + // br3.fillFast32() + MOVQ 32(R13), R14 + MOVBQZX 40(R13), R15 + CMPQ R15, $0x20 + JBE skip_fill3 + MOVQ 24(R13), AX + SUBQ $0x20, R15 + SUBQ $0x04, AX + MOVQ (R13), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(BP*1), BP + MOVQ R15, CX + SHLQ CL, BP + MOVQ AX, 24(R13) + ORQ BP, R14 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R14, BP + MOVQ SI, CX + SHRQ CL, BP + + // v0 := table[val0&mask] + MOVW (R9)(BP*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R14 + ADDB CL, R15 + + // val1 := br3.peekTopBits(peekBits) + MOVQ SI, CX + MOVQ R14, BP + SHRQ CL, BP + + // v1 := table[val1&mask] + MOVW (R9)(BP*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R14 + ADDB CL, R15 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (DI) + + // update the bitrader reader structure + MOVQ R14, 32(R13) + MOVB R15, 40(R13) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + MOVQ 40(AX), CX + MOVQ BX, DX + SUBQ CX, DX + SHLQ $0x02, DX + MOVQ DX, 64(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $16-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 32(CX), BX + MOVQ 40(CX), SI + MOVQ SI, (SP) + MOVQ 72(CX), DX + MOVQ DX, 8(SP) + MOVQ 48(CX), DI + MOVQ 56(CX), R8 + MOVQ (CX), R9 + MOVQ 8(CX), R10 + MOVQ 16(CX), R11 + MOVQ 24(CX), R12 + + // Main loop +main_loop: + MOVQ (SP), SI + CMPQ SI, 8(SP) + SETGE DL + + // br1000.fillFast32() + MOVQ 32(R9), R13 + MOVBQZX 40(R9), R14 + CMPQ R14, $0x20 + JBE skip_fill1000 + MOVQ 24(R9), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R9), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R9) + ORQ BP, R13 + + // exhausted = exhausted || (br1000.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1000: + // val0 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R9) + MOVB R14, 40(R9) + ADDQ DI, SI + + // br1001.fillFast32() + MOVQ 32(R10), R13 + MOVBQZX 40(R10), R14 + CMPQ R14, $0x20 + JBE skip_fill1001 + MOVQ 24(R10), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R10), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R10) + ORQ BP, R13 + + // exhausted = exhausted || (br1001.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1001: + // val0 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R10) + MOVB R14, 40(R10) + ADDQ DI, SI + + // br1002.fillFast32() + MOVQ 32(R11), R13 + MOVBQZX 40(R11), R14 + CMPQ R14, $0x20 + JBE skip_fill1002 + MOVQ 24(R11), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R11), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R11) + ORQ BP, R13 + + // exhausted = exhausted || (br1002.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1002: + // val0 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R11) + MOVB R14, 40(R11) + ADDQ DI, SI + + // br1003.fillFast32() + MOVQ 32(R12), R13 + MOVBQZX 40(R12), R14 + CMPQ R14, $0x20 + JBE skip_fill1003 + MOVQ 24(R12), R15 + SUBQ $0x20, R14 + SUBQ $0x04, R15 + MOVQ (R12), BP + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R15)(BP*1), BP + MOVQ R14, CX + SHLQ CL, BP + MOVQ R15, 24(R12) + ORQ BP, R13 + + // exhausted = exhausted || (br1003.off < 4) + CMPQ R15, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1003: + // val0 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v0 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v1 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v2 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R13 + ADDB CL, R14 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R13, R15 + MOVQ BX, CX + SHRQ CL, R15 + + // v3 := table[val0&mask] + MOVW (R8)(R15*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R13 + ADDB CL, R14 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (SI) + + // update the bitreader reader structure + MOVQ R13, 32(R12) + MOVB R14, 40(R12) + ADDQ $0x04, (SP) + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + MOVQ 40(AX), CX + MOVQ (SP), DX + SUBQ CX, DX + SHLQ $0x02, DX + MOVQ DX, 64(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000000..126b4d68a94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,193 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 3ee00ecb470..e8ad17ad08e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000000..3954c51219b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000000..e802579c4f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000000..4465fbe9e90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c8f0f16fc1e..beb7fa87201 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -149,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -161,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -357,62 +369,48 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 753d17df634..d7cd15ba29d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -132,6 +133,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index dc587b2c949..b2bca330182 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,14 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "io/ioutil" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,6 +43,9 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 @@ -76,16 +84,25 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - err error - decWG sync.WaitGroup + err error + + // Check against this crc + checkCRC []byte // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 tmp [4]byte @@ -108,13 +125,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -132,11 +144,17 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize @@ -147,9 +165,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if debugDecoder { @@ -157,7 +175,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -168,9 +198,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { @@ -192,85 +222,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } } -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -293,14 +252,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -310,30 +278,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -349,7 +305,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -360,7 +316,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -371,7 +327,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -381,7 +337,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -392,13 +348,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -406,7 +364,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { @@ -417,7 +375,6 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.literalBuf = make([]byte, litRegenSize) } else { b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - } } } @@ -433,7 +390,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] @@ -441,31 +398,65 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = maxCompressedBlockSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { b.literalBuf = make([]byte, 0, litRegenSize) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = maxCompressedBlockSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -474,27 +465,61 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -511,8 +536,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } - var seqs = &sequenceDecs{} + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -541,6 +574,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -548,34 +584,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec + seq.fse.setRLE(symb) if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -585,140 +623,106 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } return nil } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) return err } - if debugDecoder { - println("History merged ok") + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) } - br := &bitReader{} - if err := br.init(in); err != nil { - return err + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index aab71c6cf85..b80191e4b1e 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f430f58b572..36119f385c5 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,13 @@ package zstd import ( - "errors" + "bytes" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,12 +26,19 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -46,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } @@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput if len(d.current.b) > 0 { @@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error { } return nil } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { @@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() { } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. @@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } @@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -307,33 +328,39 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } + if cap(dst) == 0 { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. @@ -368,33 +395,176 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok } + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if !d.o.ignoreChecksum && len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last } return true } +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -402,10 +572,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -456,100 +626,307 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 4 go routines. +// 0: Read frames and decode blocks. +// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. +// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. +// 3: Wait for stream history, execute sequences, send stream history. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") + br := readerWrapper{r: r} + + var seqPrepare = make(chan *blockDec, d.o.concurrent) + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Prepare blocks... + go func() { + var hist history + var hasErr bool + for block := range seqPrepare { + if hasErr { + if block != nil { + seqDecode <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history") + } + hist.reset() + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + continue + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) + close(seqDecode) + }() + + // Async 2: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history, recent:", block.async.newHist.recentOffsets) + } + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) } - break + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize } - if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} + seqExecute <- block + } + close(seqExecute) + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 3: new history") + } + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) } - break decodeStream - default: } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + }() + +decodeStream: + for { + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + seqPrepare <- dec + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err + } + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) } } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") + err = dec.err + last := dec.Last + seqPrepare <- dec + if err != nil { + break decodeStream + } + if last { + break + } } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqPrepare) + wg.Wait() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 95cc9b8b81f..c70e6fa0f73 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -19,6 +19,7 @@ type decoderOptions struct { maxDecodedSize uint64 maxWindowSize uint64 dicts []dict + ignoreChecksum bool } func (o *decoderOptions) setDefault() { @@ -28,7 +29,10 @@ func (o *decoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), maxWindowSize: MaxWindowSize, } - o.maxDecodedSize = 1 << 63 + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -37,16 +41,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } @@ -54,7 +67,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -100,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption { return nil } } + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 5f08a283023..f51ab529a0b 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 7 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index e6e315969b0..dcc987a7cb6 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 5f2e1d020ee..44d8dbd199a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 989c79f8c31..3ff109cce4b 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -8,23 +8,17 @@ import ( "bytes" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -34,15 +28,10 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( @@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { b, err := br.readSmall(fcsSize) if err != nil { @@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error { d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error { } d.history.windowSize = int(d.WindowSize) if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + d.history.allocFrameBuffer = d.history.windowSize * 2 + // TODO: Maybe use FrameContent size } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -276,62 +272,24 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { if debugDecoder { - printf("decoding new block %p:%p", block, block.data) + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) // We can overwrite upper tmp now want, err := d.rawInput.readSmall(4) @@ -340,6 +298,18 @@ func (d *frameDec) checkCRC() error { return err } + if d.o.ignoreChecksum { + return nil + } + + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + if !bytes.Equal(tmp[:], want) { if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) @@ -352,123 +322,17 @@ func (d *frameDec) checkCRC() error { return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err } - block = next } + + return nil } // runDecoder will create a sync decoder that will decode a block of data. @@ -477,8 +341,22 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.FrameContentSize != fcsUnknown { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) @@ -489,29 +367,41 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index bb3d4fd6c31..fde4e6b6011 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d251..28b40153cc2 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,20 +10,31 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { @@ -35,7 +46,7 @@ func (h *history) reset() { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } - h.decoders = sequenceDecs{} + h.decoders = sequenceDecs{br: h.decoders.br} if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) @@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +95,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index bc731e4cb69..e80139dd9c6 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,19 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -93,12 +99,127 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] return nil } +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + if true { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + } + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +272,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +297,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +350,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +382,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -291,9 +408,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) + } + // Add final literals - s.out = append(s.out, s.literals...) - return nil + s.out = append(out, s.literals...) + return br.close() } // update states, at least 27 bits must be available. @@ -457,36 +579,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000000..4676b09cc18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,350 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + + ok := sequenceDecs_executeSimple_amd64(&ctx) + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000000..2585b2e988d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,3517 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, DI + +sequenceDecs_decode_56_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R8 + +sequenceDecs_decode_56_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R14, $0x00 + JZ sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R14, BX + MOVQ DX, R15 + SHLQ CL, R15 + MOVQ R14, CX + NEGQ CX + SHRQ CL, R15 + ADDQ R15, R9 + +sequenceDecs_decode_56_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R14 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + ADDQ R15, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, R11 + JZ copy_1_word + MOVB (SI)(R14*1), R15 + MOVB R15, (BX)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, R11 + JZ copy_1_dword + MOVW (SI)(R14*1), R15 + MOVW R15, (BX)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, R11 + JZ copy_1_qword + MOVL (SI)(R14*1), R15 + MOVL R15, (BX)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, R11 + JZ copy_1_test + MOVQ (SI)(R14*1), R15 + MOVQ R15, (BX)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JGE copy_all_from_history + XORQ R11, R11 + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(R11*1), R12 + MOVB R12, (BX)(R11*1) + ADDQ $0x01, R11 + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(R11*1), R12 + MOVW R12, (BX)(R11*1) + ADDQ $0x02, R11 + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(R11*1), R12 + MOVL R12, (BX)(R11*1) + ADDQ $0x04, R11 + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(R11*1), R12 + MOVQ R12, (BX)(R11*1) + ADDQ $0x08, R11 + JMP copy_4_test + +copy_4: + MOVUPS (R14)(R11*1), X0 + MOVUPS X0, (BX)(R11*1) + ADDQ $0x10, R11 + +copy_4_test: + CMPQ R11, R13 + JB copy_4 + ADDQ R13, DI + ADDQ R13, BX + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, R11 + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (BX)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, R11 + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (BX)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, R11 + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (BX)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, R11 + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (BX)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (BX)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, R11 + JB copy_5 + ADDQ R11, BX + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, AX + JZ copy_1_word + MOVB (R11)(R14*1), R15 + MOVB R15, (R10)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, AX + JZ copy_1_dword + MOVW (R11)(R14*1), R15 + MOVW R15, (R10)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, AX + JZ copy_1_qword + MOVL (R11)(R14*1), R15 + MOVL R15, (R10)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, AX + JZ copy_1_test + MOVQ (R11)(R14*1), R15 + MOVQ R15, (R10)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, CX + JZ copy_1_word + MOVB (R10)(R14*1), R15 + MOVB R15, (R9)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, CX + JZ copy_1_dword + MOVW (R10)(R14*1), R15 + MOVW R15, (R9)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, CX + JZ copy_1_qword + MOVL (R10)(R14*1), R15 + MOVL R15, (R9)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, CX + JZ copy_1_test + MOVQ (R10)(R14*1), R15 + MOVQ R15, (R9)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, DI + +sequenceDecs_decodeSync_safe_amd64_llState_updateState_skip_zero: + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R8 + +sequenceDecs_decodeSync_safe_amd64_mlState_updateState_skip_zero: + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + CMPQ R13, $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero + MOVQ BX, CX + ADDQ R13, BX + MOVQ DX, R14 + SHLQ CL, R14 + MOVQ R13, CX + NEGQ CX + SHRQ CL, R14 + ADDQ R14, R9 + +sequenceDecs_decodeSync_safe_amd64_ofState_updateState_skip_zero: + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, AX + JZ copy_1_word + MOVB (R11)(R14*1), R15 + MOVB R15, (R10)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, AX + JZ copy_1_dword + MOVW (R11)(R14*1), R15 + MOVW R15, (R10)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, AX + JZ copy_1_qword + MOVL (R11)(R14*1), R15 + MOVL R15, (R10)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, AX + JZ copy_1_test + MOVQ (R11)(R14*1), R15 + MOVQ R15, (R10)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JGE copy_all_from_history + XORQ AX, AX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(AX*1), CL + MOVB CL, (R10)(AX*1) + ADDQ $0x01, AX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(AX*1), CX + MOVW CX, (R10)(AX*1) + ADDQ $0x02, AX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(AX*1), CX + MOVL CX, (R10)(AX*1) + ADDQ $0x04, AX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(AX*1), CX + MOVQ CX, (R10)(AX*1) + ADDQ $0x08, AX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(AX*1), X0 + MOVUPS X0, (R10)(AX*1) + ADDQ $0x10, AX + +copy_4_test: + CMPQ AX, R13 + JB copy_4 + ADDQ R13, R12 + ADDQ R13, R10 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, AX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R10)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, AX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R10)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, AX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R10)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, AX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R10)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R10)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, AX + JB copy_5 + ADDQ AX, R10 + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (AX)(CX*1), R14 + MOVB R14, (R10)(CX*1) + ADDQ $0x01, CX + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (AX)(CX*1), R14 + MOVW R14, (R10)(CX*1) + ADDQ $0x02, CX + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (AX)(CX*1), R14 + MOVL R14, (R10)(CX*1) + ADDQ $0x04, CX + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (AX)(CX*1), R14 + MOVQ R14, (R10)(CX*1) + ADDQ $0x08, CX + JMP copy_2_test + +copy_2: + MOVUPS (AX)(CX*1), X0 + MOVUPS X0, (R10)(CX*1) + ADDQ $0x10, CX + +copy_2_test: + CMPQ CX, R13 + JB copy_2 + ADDQ R13, R10 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + + // Update Literal Length State + MOVBQZX SI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, SI, SI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + + // Update Match Length State + MOVBQZX DI, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, DI, DI + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Offset State + MOVBQZX R8, R13 + MOVQ $0x00001010, CX + BEXTRQ CX, R8, R8 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + ADDQ R14, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + TESTQ $0x00000001, CX + JZ copy_1_word + MOVB (R10)(R14*1), R15 + MOVB R15, (R9)(R14*1) + ADDQ $0x01, R14 + +copy_1_word: + TESTQ $0x00000002, CX + JZ copy_1_dword + MOVW (R10)(R14*1), R15 + MOVW R15, (R9)(R14*1) + ADDQ $0x02, R14 + +copy_1_dword: + TESTQ $0x00000004, CX + JZ copy_1_qword + MOVL (R10)(R14*1), R15 + MOVL R15, (R9)(R14*1) + ADDQ $0x04, R14 + +copy_1_qword: + TESTQ $0x00000008, CX + JZ copy_1_test + MOVQ (R10)(R14*1), R15 + MOVQ R15, (R9)(R14*1) + ADDQ $0x08, R14 + JMP copy_1_test + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + +copy_1_test: + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JGE copy_all_from_history + XORQ CX, CX + TESTQ $0x00000001, R13 + JZ copy_4_word + MOVB (R14)(CX*1), R12 + MOVB R12, (R9)(CX*1) + ADDQ $0x01, CX + +copy_4_word: + TESTQ $0x00000002, R13 + JZ copy_4_dword + MOVW (R14)(CX*1), R12 + MOVW R12, (R9)(CX*1) + ADDQ $0x02, CX + +copy_4_dword: + TESTQ $0x00000004, R13 + JZ copy_4_qword + MOVL (R14)(CX*1), R12 + MOVL R12, (R9)(CX*1) + ADDQ $0x04, CX + +copy_4_qword: + TESTQ $0x00000008, R13 + JZ copy_4_test + MOVQ (R14)(CX*1), R12 + MOVQ R12, (R9)(CX*1) + ADDQ $0x08, CX + JMP copy_4_test + +copy_4: + MOVUPS (R14)(CX*1), X0 + MOVUPS X0, (R9)(CX*1) + ADDQ $0x10, CX + +copy_4_test: + CMPQ CX, R13 + JB copy_4 + ADDQ R13, R11 + ADDQ R13, R9 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + XORQ R15, R15 + TESTQ $0x00000001, CX + JZ copy_5_word + MOVB (R14)(R15*1), BP + MOVB BP, (R9)(R15*1) + ADDQ $0x01, R15 + +copy_5_word: + TESTQ $0x00000002, CX + JZ copy_5_dword + MOVW (R14)(R15*1), BP + MOVW BP, (R9)(R15*1) + ADDQ $0x02, R15 + +copy_5_dword: + TESTQ $0x00000004, CX + JZ copy_5_qword + MOVL (R14)(R15*1), BP + MOVL BP, (R9)(R15*1) + ADDQ $0x04, R15 + +copy_5_qword: + TESTQ $0x00000008, CX + JZ copy_5_test + MOVQ (R14)(R15*1), BP + MOVQ BP, (R9)(R15*1) + ADDQ $0x08, R15 + JMP copy_5_test + +copy_5: + MOVUPS (R14)(R15*1), X0 + MOVUPS X0, (R9)(R15*1) + ADDQ $0x10, R15 + +copy_5_test: + CMPQ R15, CX + JB copy_5 + ADDQ CX, R9 + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + TESTQ R13, R13 + JZ handle_loop + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + XORQ R12, R12 + TESTQ $0x00000001, R13 + JZ copy_2_word + MOVB (CX)(R12*1), R14 + MOVB R14, (R9)(R12*1) + ADDQ $0x01, R12 + +copy_2_word: + TESTQ $0x00000002, R13 + JZ copy_2_dword + MOVW (CX)(R12*1), R14 + MOVW R14, (R9)(R12*1) + ADDQ $0x02, R12 + +copy_2_dword: + TESTQ $0x00000004, R13 + JZ copy_2_qword + MOVL (CX)(R12*1), R14 + MOVL R14, (R9)(R12*1) + ADDQ $0x04, R12 + +copy_2_qword: + TESTQ $0x00000008, R13 + JZ copy_2_test + MOVQ (CX)(R12*1), R14 + MOVQ R14, (R9)(R12*1) + ADDQ $0x08, R12 + JMP copy_2_test + +copy_2: + MOVUPS (CX)(R12*1), X0 + MOVUPS X0, (R9)(R12*1) + ADDQ $0x10, R12 + +copy_2_test: + CMPQ R12, R13 + JB copy_2 + ADDQ R13, R9 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000000..c3452bc3a9e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 967f29b3120..b53f606a18a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -20,34 +20,49 @@ const ZipMethodPKWare = 20 var zipReaderPool sync.Pool -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -57,7 +72,7 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err @@ -111,12 +126,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index ef1d49a009c..c1c90b4a072 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -39,6 +39,9 @@ const zstdMinMatch = 3 // Reset the buffer offset when reaching this. const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. @@ -52,6 +55,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -75,6 +82,10 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go index 45ddad236f3..242f82cc72a 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go @@ -4,7 +4,6 @@ package mountinfo import ( - "fmt" "os" "path/filepath" @@ -33,13 +32,13 @@ func mountedByStat(path string) (bool, error) { func normalizePath(path string) (realPath string, err error) { if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) + return "", err } if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) + return "", err } if _, err := os.Stat(realPath); err != nil { - return "", fmt.Errorf("failed to stat target of %q: %w", path, err) + return "", err } return realPath, nil } diff --git a/vendor/github.com/nxadm/tail/.gitignore b/vendor/github.com/nxadm/tail/.gitignore new file mode 100644 index 00000000000..35d9351d385 --- /dev/null +++ b/vendor/github.com/nxadm/tail/.gitignore @@ -0,0 +1,3 @@ +.idea/ +.test/ +examples/_* \ No newline at end of file diff --git a/vendor/github.com/nxadm/tail/CHANGES.md b/vendor/github.com/nxadm/tail/CHANGES.md new file mode 100644 index 00000000000..224e54b44de --- /dev/null +++ b/vendor/github.com/nxadm/tail/CHANGES.md @@ -0,0 +1,56 @@ +# Version v1.4.7-v1.4.8 +* Documentation updates. +* Small linter cleanups. +* Added example in test. + +# Version v1.4.6 + +* Document the usage of Cleanup when re-reading a file (thanks to @lesovsky) for issue #18. +* Add example directories with example and tests for issues. + +# Version v1.4.4-v1.4.5 + +* Fix of checksum problem because of forced tag. No changes to the code. + +# Version v1.4.1 + +* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag". + +# Version v1.4.0 + +* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail". + +# Version v1.3.1 + +* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer", +fixes upstream issue 93. + + +# Version v1.3.0 + +* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num +to Line struct". + +# Version v1.2.1 + +* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able +code in readme". +* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change +to comment wording". +* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed +spurious newlines from log messages". + +# Version v1.2.0 + +* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the + problem for never return the last line if it's not followed by a newline". +* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove +deprecated os.SEEK consts". The changes bumped the minimal supported Go +release to 1.9. + +# Version v1.1.0 + +* migration to go modules. +* release of master branch of the dormant upstream, because it contains +fixes and improvement no present in the tagged release. + diff --git a/vendor/github.com/nxadm/tail/Dockerfile b/vendor/github.com/nxadm/tail/Dockerfile new file mode 100644 index 00000000000..d9633891c00 --- /dev/null +++ b/vendor/github.com/nxadm/tail/Dockerfile @@ -0,0 +1,19 @@ +FROM golang + +RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/ +ADD . $GOPATH/src/github.com/nxadm/tail/ + +# expecting to fetch dependencies successfully. +RUN go get -v github.com/nxadm/tail + +# expecting to run the test successfully. +RUN go test -v github.com/nxadm/tail + +# expecting to install successfully +RUN go install -v github.com/nxadm/tail +RUN go install -v github.com/nxadm/tail/cmd/gotail + +RUN $GOPATH/bin/gotail -h || true + +ENV PATH $GOPATH/bin:$PATH +CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/nxadm/tail/LICENSE similarity index 100% rename from vendor/github.com/hpcloud/tail/LICENSE.txt rename to vendor/github.com/nxadm/tail/LICENSE diff --git a/vendor/github.com/nxadm/tail/README.md b/vendor/github.com/nxadm/tail/README.md new file mode 100644 index 00000000000..f47939c749d --- /dev/null +++ b/vendor/github.com/nxadm/tail/README.md @@ -0,0 +1,44 @@ +![ci](https://github.com/nxadm/tail/workflows/ci/badge.svg)[![Go Reference](https://pkg.go.dev/badge/github.com/nxadm/tail.svg)](https://pkg.go.dev/github.com/nxadm/tail) + +# tail functionality in Go + +nxadm/tail provides a Go library that emulates the features of the BSD `tail` +program. The library comes with full support for truncation/move detection as +it is designed to work with log rotation tools. The library works on all +operating systems supported by Go, including POSIX systems like Linux and +*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported. + +A simple example: + +```Go +// Create a tail +t, err := tail.TailFile( + "/var/log/nginx.log", tail.Config{Follow: true, ReOpen: true}) +if err != nil { + panic(err) +} + +// Print the text of each received line +for line := range t.Lines { + fmt.Println(line.Text) +} +``` + +See [API documentation](https://pkg.go.dev/github.com/nxadm/tail). + +## Installing + + go get github.com/nxadm/tail/... + +## History + +This project is an active, drop-in replacement for the +[abandoned](https://en.wikipedia.org/wiki/HPE_Helion) Go tail library at +[hpcloud](https://github.com/hpcloud/tail). Next to +[addressing open issues/PRs of the original project](https://github.com/nxadm/tail/issues/6), +nxadm/tail continues the development by keeping up to date with the Go toolchain +(e.g. go modules) and dependencies, completing the documentation, adding features +and fixing bugs. + +## Examples +Examples, e.g. used to debug an issue, are kept in the [examples directory](/examples). \ No newline at end of file diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/nxadm/tail/ratelimiter/Licence similarity index 100% rename from vendor/github.com/hpcloud/tail/ratelimiter/Licence rename to vendor/github.com/nxadm/tail/ratelimiter/Licence diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go similarity index 100% rename from vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go rename to vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/nxadm/tail/ratelimiter/memory.go similarity index 81% rename from vendor/github.com/hpcloud/tail/ratelimiter/memory.go rename to vendor/github.com/nxadm/tail/ratelimiter/memory.go index 8f6a5784a9a..bf3c2131b1e 100644 --- a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go +++ b/vendor/github.com/nxadm/tail/ratelimiter/memory.go @@ -5,7 +5,10 @@ import ( "time" ) -const GC_SIZE int = 100 +const ( + GC_SIZE int = 100 + GC_PERIOD time.Duration = 60 * time.Second +) type Memory struct { store map[string]LeakyBucket @@ -44,11 +47,10 @@ func (m *Memory) GarbageCollect() { now := time.Now() // rate limit GC to once per minute - if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() { - + if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() { for key, bucket := range m.store { // if the bucket is drained, then GC - if bucket.DrainedAt().Unix() > now.Unix() { + if bucket.DrainedAt().Unix() < now.Unix() { delete(m.store, key) } } diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/nxadm/tail/ratelimiter/storage.go similarity index 100% rename from vendor/github.com/hpcloud/tail/ratelimiter/storage.go rename to vendor/github.com/nxadm/tail/ratelimiter/storage.go diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/nxadm/tail/tail.go similarity index 68% rename from vendor/github.com/hpcloud/tail/tail.go rename to vendor/github.com/nxadm/tail/tail.go index 2d252d60572..37ea4411e9d 100644 --- a/vendor/github.com/hpcloud/tail/tail.go +++ b/vendor/github.com/nxadm/tail/tail.go @@ -1,6 +1,12 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. +//nxadm/tail provides a Go library that emulates the features of the BSD `tail` +//program. The library comes with full support for truncation/move detection as +//it is designed to work with log rotation tools. The library works on all +//operating systems supported by Go, including POSIX systems like Linux and +//*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported. package tail import ( @@ -15,31 +21,38 @@ import ( "sync" "time" - "github.com/hpcloud/tail/ratelimiter" - "github.com/hpcloud/tail/util" - "github.com/hpcloud/tail/watch" + "github.com/nxadm/tail/ratelimiter" + "github.com/nxadm/tail/util" + "github.com/nxadm/tail/watch" "gopkg.in/tomb.v1" ) var ( - ErrStop = fmt.Errorf("tail should now stop") + // ErrStop is returned when the tail of a file has been marked to be stopped. + ErrStop = errors.New("tail should now stop") ) type Line struct { - Text string - Time time.Time - Err error // Error from tail + Text string // The contents of the file + Num int // The line number + SeekInfo SeekInfo // SeekInfo + Time time.Time // Present time + Err error // Error from tail } -// NewLine returns a Line with present time. -func NewLine(text string) *Line { - return &Line{text, time.Now(), nil} +// Deprecated: this function is no longer used internally and it has little of no +// use in the API. As such, it will be removed from the API in a future major +// release. +// +// NewLine returns a * pointer to a Line struct. +func NewLine(text string, lineNum int) *Line { + return &Line{text, lineNum, SeekInfo{}, time.Now(), nil} } -// SeekInfo represents arguments to `os.Seek` +// SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek type SeekInfo struct { Offset int64 - Whence int // os.SEEK_* + Whence int } type logger interface { @@ -57,29 +70,32 @@ type logger interface { // Config is used to specify how a file must be tailed. type Config struct { // File-specifc - Location *SeekInfo // Seek to this location before tailing - ReOpen bool // Reopen recreated files (tail -F) - MustExist bool // Fail early if the file does not exist - Poll bool // Poll for file changes instead of using inotify - Pipe bool // Is a named pipe (mkfifo) - RateLimiter *ratelimiter.LeakyBucket + Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using the default inotify + Pipe bool // The file is a named pipe (mkfifo) // Generic IO Follow bool // Continue looking for new lines (tail -f) MaxLineSize int // If non-zero, split longer lines into multiple lines - // Logger, when nil, is set to tail.DefaultLogger - // To disable logging: set field to tail.DiscardingLogger + // Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function) + RateLimiter *ratelimiter.LeakyBucket + + // Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger. + // To disable logging, set it to tail.DiscardingLogger Logger logger } type Tail struct { - Filename string - Lines chan *Line - Config + Filename string // The filename + Lines chan *Line // A consumable channel of *Line + Config // Tail.Configuration - file *os.File - reader *bufio.Reader + file *os.File + reader *bufio.Reader + lineNum int watcher watch.FileWatcher changes *watch.FileChanges @@ -90,16 +106,17 @@ type Tail struct { } var ( - // DefaultLogger is used when Config.Logger == nil + // DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) // DiscardingLogger can be used to disable logging output DiscardingLogger = log.New(ioutil.Discard, "", 0) ) -// TailFile begins tailing the file. Output stream is made available -// via the `Tail.Lines` channel. To handle errors during tailing, -// invoke the `Wait` or `Err` method after finishing reading from the -// `Lines` channel. +// TailFile begins tailing the file. And returns a pointer to a Tail struct +// and an error. An output stream is made available via the Tail.Lines +// channel (e.g. to be looped and printed). To handle errors during tailing, +// after finishing reading from the Lines channel, invoke the `Wait` or `Err` +// method on the returned *Tail. func TailFile(filename string, config Config) (*Tail, error) { if config.ReOpen && !config.Follow { util.Fatal("cannot set ReOpen without Follow.") @@ -113,7 +130,7 @@ func TailFile(filename string, config Config) (*Tail, error) { // when Logger was not specified in config, use default logger if t.Logger == nil { - t.Logger = log.New(os.Stderr, "", log.LstdFlags) + t.Logger = DefaultLogger } if t.Poll { @@ -135,15 +152,14 @@ func TailFile(filename string, config Config) (*Tail, error) { return t, nil } -// Return the file's current position, like stdio's ftell(). -// But this value is not very accurate. -// it may readed one line in the chan(tail.Lines), -// so it may lost one line. +// Tell returns the file's current position, like stdio's ftell() and an error. +// Beware that this value may not be completely accurate because one line from +// the chan(tail.Lines) may have been read already. func (tail *Tail) Tell() (offset int64, err error) { if tail.file == nil { return } - offset, err = tail.file.Seek(0, os.SEEK_CUR) + offset, err = tail.file.Seek(0, io.SeekCurrent) if err != nil { return } @@ -164,7 +180,8 @@ func (tail *Tail) Stop() error { return tail.Wait() } -// StopAtEOF stops tailing as soon as the end of the file is reached. +// StopAtEOF stops tailing as soon as the end of the file is reached. The function +// returns an error, func (tail *Tail) StopAtEOF() error { tail.Kill(errStopAtEOF) return tail.Wait() @@ -186,6 +203,7 @@ func (tail *Tail) closeFile() { func (tail *Tail) reopen() error { tail.closeFile() + tail.lineNum = 0 for { var err error tail.file, err = OpenFile(tail.Filename) @@ -241,7 +259,6 @@ func (tail *Tail) tailFileSync() { // Seek to requested location on first open of the file. if tail.Location != nil { _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) - tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location) if err != nil { tail.Killf("Seek error on %s: %s", tail.Filename, err) return @@ -250,16 +267,12 @@ func (tail *Tail) tailFileSync() { tail.openReader() - var offset int64 = 0 - var err error - // Read line by line. for { // do not seek in named pipes if !tail.Pipe { // grab the position in case we need to back up in the event of a half-line - offset, err = tail.Tell() - if err != nil { + if _, err := tail.Tell(); err != nil { tail.Kill(err) return } @@ -273,10 +286,9 @@ func (tail *Tail) tailFileSync() { if cooloff { // Wait a second before seeking till the end of // file when rate limit is reached. - msg := fmt.Sprintf( - "Too much log activity; waiting a second " + - "before resuming tailing") - tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)} + msg := ("Too much log activity; waiting a second before resuming tailing") + offset, _ := tail.Tell() + tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)} select { case <-time.After(time.Second): case <-tail.Dying(): @@ -296,10 +308,8 @@ func (tail *Tail) tailFileSync() { } if tail.Follow && line != "" { - // this has the potential to never return the last line if - // it's not followed by a newline; seems a fair trade here - err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0}) - if err != nil { + tail.sendLine(line) + if err := tail.seekEnd(); err != nil { tail.Kill(err) return } @@ -337,7 +347,7 @@ func (tail *Tail) tailFileSync() { // reopened if ReOpen is true. Truncated files are always reopened. func (tail *Tail) waitForChanges() error { if tail.changes == nil { - pos, err := tail.file.Seek(0, os.SEEK_CUR) + pos, err := tail.file.Seek(0, io.SeekCurrent) if err != nil { return err } @@ -361,10 +371,9 @@ func (tail *Tail) waitForChanges() error { tail.Logger.Printf("Successfully reopened %s", tail.Filename) tail.openReader() return nil - } else { - tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) - return ErrStop } + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop case <-tail.changes.Truncated: // Always reopen truncated files (Follow is true) tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) @@ -377,20 +386,21 @@ func (tail *Tail) waitForChanges() error { case <-tail.Dying(): return ErrStop } - panic("unreachable") } func (tail *Tail) openReader() { + tail.lk.Lock() if tail.MaxLineSize > 0 { // add 2 to account for newline characters tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) } else { tail.reader = bufio.NewReader(tail.file) } + tail.lk.Unlock() } func (tail *Tail) seekEnd() error { - return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END}) + return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd}) } func (tail *Tail) seekTo(pos SeekInfo) error { @@ -415,13 +425,19 @@ func (tail *Tail) sendLine(line string) bool { } for _, line := range lines { - tail.Lines <- &Line{line, now, nil} + tail.lineNum++ + offset, _ := tail.Tell() + select { + case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}: + case <-tail.Dying(): + return true + } } if tail.Config.RateLimiter != nil { ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) if !ok { - tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n", + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.", tail.Filename) return false } @@ -433,6 +449,7 @@ func (tail *Tail) sendLine(line string) bool { // Cleanup removes inotify watches added by the tail package. This function is // meant to be invoked from a process's exit handler. Linux kernel may not // automatically remove inotify watches after the process exits. +// If you plan to re-read a file, don't call Cleanup in between. func (tail *Tail) Cleanup() { watch.Cleanup(tail.Filename) } diff --git a/vendor/github.com/nxadm/tail/tail_posix.go b/vendor/github.com/nxadm/tail/tail_posix.go new file mode 100644 index 00000000000..23e071dea16 --- /dev/null +++ b/vendor/github.com/nxadm/tail/tail_posix.go @@ -0,0 +1,17 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +// +build !windows + +package tail + +import ( + "os" +) + +// Deprecated: this function is only useful internally and, as such, +// it will be removed from the API in a future major release. +// +// OpenFile proxies a os.Open call for a file so it can be correctly tailed +// on POSIX and non-POSIX OSes like MS Windows. +func OpenFile(name string) (file *os.File, err error) { + return os.Open(name) +} diff --git a/vendor/github.com/nxadm/tail/tail_windows.go b/vendor/github.com/nxadm/tail/tail_windows.go new file mode 100644 index 00000000000..da0d2f39c97 --- /dev/null +++ b/vendor/github.com/nxadm/tail/tail_windows.go @@ -0,0 +1,19 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail +// +build windows + +package tail + +import ( + "os" + + "github.com/nxadm/tail/winfile" +) + +// Deprecated: this function is only useful internally and, as such, +// it will be removed from the API in a future major release. +// +// OpenFile proxies a os.Open call for a file so it can be correctly tailed +// on POSIX and non-POSIX OSes like MS Windows. +func OpenFile(name string) (file *os.File, err error) { + return winfile.OpenFile(name, os.O_RDONLY, 0) +} diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/nxadm/tail/util/util.go similarity index 88% rename from vendor/github.com/hpcloud/tail/util/util.go rename to vendor/github.com/nxadm/tail/util/util.go index 54151fe39f1..b64caa21269 100644 --- a/vendor/github.com/hpcloud/tail/util/util.go +++ b/vendor/github.com/nxadm/tail/util/util.go @@ -1,3 +1,4 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. @@ -18,7 +19,7 @@ var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} // fatal is like panic except it displays only the current goroutine's stack. func Fatal(format string, v ...interface{}) { - // https://github.com/hpcloud/log/blob/master/log.go#L45 + // https://github.com/nxadm/log/blob/master/log.go#L45 LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) os.Exit(1) } diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/nxadm/tail/watch/filechanges.go similarity index 86% rename from vendor/github.com/hpcloud/tail/watch/filechanges.go rename to vendor/github.com/nxadm/tail/watch/filechanges.go index 3ce5dcecbb2..5b65f42ae58 100644 --- a/vendor/github.com/hpcloud/tail/watch/filechanges.go +++ b/vendor/github.com/nxadm/tail/watch/filechanges.go @@ -1,3 +1,4 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail package watch type FileChanges struct { @@ -8,7 +9,7 @@ type FileChanges struct { func NewFileChanges() *FileChanges { return &FileChanges{ - make(chan bool), make(chan bool), make(chan bool)} + make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} } func (fc *FileChanges) NotifyModified() { diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/nxadm/tail/watch/inotify.go similarity index 85% rename from vendor/github.com/hpcloud/tail/watch/inotify.go rename to vendor/github.com/nxadm/tail/watch/inotify.go index 4478f1e1a01..cbd11ad8d03 100644 --- a/vendor/github.com/hpcloud/tail/watch/inotify.go +++ b/vendor/github.com/nxadm/tail/watch/inotify.go @@ -1,3 +1,4 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. @@ -8,9 +9,9 @@ import ( "os" "path/filepath" - "github.com/hpcloud/tail/util" + "github.com/nxadm/tail/util" - "gopkg.in/fsnotify.v1" + "github.com/fsnotify/fsnotify" "gopkg.in/tomb.v1" ) @@ -75,7 +76,6 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange fw.Size = pos go func() { - defer RemoveWatch(fw.Filename) events := Events(fw.Filename) @@ -88,9 +88,11 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange select { case evt, ok = <-events: if !ok { + RemoveWatch(fw.Filename) return } case <-t.Dying(): + RemoveWatch(fw.Filename) return } @@ -99,13 +101,19 @@ func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChange fallthrough case evt.Op&fsnotify.Rename == fsnotify.Rename: + RemoveWatch(fw.Filename) changes.NotifyDeleted() return + //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod) + case evt.Op&fsnotify.Chmod == fsnotify.Chmod: + fallthrough + case evt.Op&fsnotify.Write == fsnotify.Write: fi, err := os.Stat(fw.Filename) if err != nil { if os.IsNotExist(err) { + RemoveWatch(fw.Filename) changes.NotifyDeleted() return } diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go similarity index 86% rename from vendor/github.com/hpcloud/tail/watch/inotify_tracker.go rename to vendor/github.com/nxadm/tail/watch/inotify_tracker.go index 03be4275ca2..cb9572a0302 100644 --- a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go +++ b/vendor/github.com/nxadm/tail/watch/inotify_tracker.go @@ -1,3 +1,4 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. @@ -10,9 +11,9 @@ import ( "sync" "syscall" - "github.com/hpcloud/tail/util" + "github.com/nxadm/tail/util" - "gopkg.in/fsnotify.v1" + "github.com/fsnotify/fsnotify" ) type InotifyTracker struct { @@ -83,21 +84,21 @@ func watch(winfo *watchInfo) error { } // RemoveWatch signals the run goroutine to remove the watch for the input filename -func RemoveWatch(fname string) { - remove(&watchInfo{ +func RemoveWatch(fname string) error { + return remove(&watchInfo{ fname: fname, }) } // RemoveWatch create signals the run goroutine to remove the watch for the input filename -func RemoveWatchCreate(fname string) { - remove(&watchInfo{ +func RemoveWatchCreate(fname string) error { + return remove(&watchInfo{ op: fsnotify.Create, fname: fname, }) } -func remove(winfo *watchInfo) { +func remove(winfo *watchInfo) error { // start running the shared InotifyTracker if not already running once.Do(goRun) @@ -108,27 +109,10 @@ func remove(winfo *watchInfo) { delete(shared.done, winfo.fname) close(done) } - - fname := winfo.fname - if winfo.isCreate() { - // Watch for new files to be created in the parent directory. - fname = filepath.Dir(fname) - } - shared.watchNums[fname]-- - watchNum := shared.watchNums[fname] - if watchNum == 0 { - delete(shared.watchNums, fname) - } shared.mux.Unlock() - // If we were the last ones to watch this file, unsubscribe from inotify. - // This needs to happen after releasing the lock because fsnotify waits - // synchronously for the kernel to acknowledge the removal of the watch - // for this file, which causes us to deadlock if we still held the lock. - if watchNum == 0 { - shared.watcher.Remove(fname) - } shared.remove <- winfo + return <-shared.error } // Events returns a channel to which FileEvents corresponding to the input filename @@ -142,8 +126,8 @@ func Events(fname string) <-chan fsnotify.Event { } // Cleanup removes the watch for the input filename if necessary. -func Cleanup(fname string) { - RemoveWatch(fname) +func Cleanup(fname string) error { + return RemoveWatch(fname) } // watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating @@ -154,6 +138,8 @@ func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { if shared.chans[winfo.fname] == nil { shared.chans[winfo.fname] = make(chan fsnotify.Event) + } + if shared.done[winfo.fname] == nil { shared.done[winfo.fname] = make(chan bool) } @@ -163,47 +149,50 @@ func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { fname = filepath.Dir(fname) } + var err error // already in inotify watch - if shared.watchNums[fname] > 0 { - shared.watchNums[fname]++ - if winfo.isCreate() { - shared.watchNums[winfo.fname]++ - } - return nil + if shared.watchNums[fname] == 0 { + err = shared.watcher.Add(fname) } - - err := shared.watcher.Add(fname) if err == nil { shared.watchNums[fname]++ - if winfo.isCreate() { - shared.watchNums[winfo.fname]++ - } } return err } // removeWatch calls fsnotify.RemoveWatch for the input filename and closes the // corresponding events channel. -func (shared *InotifyTracker) removeWatch(winfo *watchInfo) { +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error { shared.mux.Lock() - defer shared.mux.Unlock() ch := shared.chans[winfo.fname] - if ch == nil { - return + if ch != nil { + delete(shared.chans, winfo.fname) + close(ch) } - delete(shared.chans, winfo.fname) - close(ch) - - if !winfo.isCreate() { - return + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() - shared.watchNums[winfo.fname]-- - if shared.watchNums[winfo.fname] == 0 { - delete(shared.watchNums, winfo.fname) + var err error + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + err = shared.watcher.Remove(fname) } + + return err } // sendEvent sends the input event to the appropriate Tail. @@ -238,7 +227,7 @@ func (shared *InotifyTracker) run() { shared.error <- shared.addWatch(winfo) case winfo := <-shared.remove: - shared.removeWatch(winfo) + shared.error <- shared.removeWatch(winfo) case event, open := <-shared.watcher.Events: if !open { diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/nxadm/tail/watch/polling.go similarity index 96% rename from vendor/github.com/hpcloud/tail/watch/polling.go rename to vendor/github.com/nxadm/tail/watch/polling.go index 49491f21dbf..74e10aa427c 100644 --- a/vendor/github.com/hpcloud/tail/watch/polling.go +++ b/vendor/github.com/nxadm/tail/watch/polling.go @@ -1,3 +1,4 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. @@ -8,7 +9,7 @@ import ( "runtime" "time" - "github.com/hpcloud/tail/util" + "github.com/nxadm/tail/util" "gopkg.in/tomb.v1" ) diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/nxadm/tail/watch/watch.go similarity index 91% rename from vendor/github.com/hpcloud/tail/watch/watch.go rename to vendor/github.com/nxadm/tail/watch/watch.go index 2e1783ef0aa..2b5112805a1 100644 --- a/vendor/github.com/hpcloud/tail/watch/watch.go +++ b/vendor/github.com/nxadm/tail/watch/watch.go @@ -1,3 +1,4 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/nxadm/tail/winfile/winfile.go similarity index 97% rename from vendor/github.com/hpcloud/tail/winfile/winfile.go rename to vendor/github.com/nxadm/tail/winfile/winfile.go index aa7e7bc5df5..4562ac7c25c 100644 --- a/vendor/github.com/hpcloud/tail/winfile/winfile.go +++ b/vendor/github.com/nxadm/tail/winfile/winfile.go @@ -1,3 +1,4 @@ +// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // +build windows package winfile diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go index 98ccc51655c..5a68a3cf394 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go @@ -289,7 +289,13 @@ func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, err entry.Path = fmt.Sprintf("/dev/char/%d:%d", rule.Major, rule.Minor) } } - deviceAllowList = append(deviceAllowList, entry) + // systemd will issue a warning if the path we give here doesn't exist. + // Since all of this logic is best-effort anyway (we manually set these + // rules separately to systemd) we can safely skip entries that don't + // have a corresponding path. + if _, err := os.Stat(entry.Path); err == nil { + deviceAllowList = append(deviceAllowList, entry) + } } properties = append(properties, newProp("DeviceAllow", deviceAllowList)) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go index 3e547e282b5..bb87ae83aef 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go @@ -2,6 +2,7 @@ package systemd import ( "context" + "errors" "fmt" "sync" @@ -80,8 +81,6 @@ func (d *dbusConnManager) resetConnection(conn *systemdDbus.Conn) { } } -var errDbusConnClosed = dbus.ErrClosed.Error() - // retryOnDisconnect calls op, and if the error it returns is about closed dbus // connection, the connection is re-established and the op is retried. This helps // with the situation when dbus is restarted and we have a stale connection. @@ -92,7 +91,10 @@ func (d *dbusConnManager) retryOnDisconnect(op func(*systemdDbus.Conn) error) er return err } err = op(conn) - if !isDbusError(err, errDbusConnClosed) { + if err == nil { + return nil + } + if !errors.Is(err, dbus.ErrClosed) { return err } d.resetConnection(conn) diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 6a7a91e5596..c0e8794482c 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -15,7 +15,7 @@ type Spec struct { // Mounts configures additional mounts (on top of Root). Mounts []Mount `json:"mounts,omitempty"` // Hooks configures callbacks for container lifecycle events. - Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"` + Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"` // Annotations contains arbitrary metadata for the container. Annotations map[string]string `json:"annotations,omitempty"` @@ -27,6 +27,8 @@ type Spec struct { Windows *Windows `json:"windows,omitempty" platform:"windows"` // VM specifies configuration for virtual-machine-based containers. VM *VM `json:"vm,omitempty" platform:"vm"` + // ZOS is platform-specific configuration for z/OS based containers. + ZOS *ZOS `json:"zos,omitempty" platform:"zos"` } // Process contains information to start a specific application inside the container. @@ -49,7 +51,7 @@ type Process struct { // Capabilities are Linux capabilities that are kept for the process. Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` // Rlimits specifies rlimit options to apply to the process. - Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"` + Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` // ApparmorProfile specifies the apparmor profile for the container. @@ -86,11 +88,11 @@ type Box struct { // User specifies specific user (and group) information for the container process. type User struct { // UID is the user id. - UID uint32 `json:"uid" platform:"linux,solaris"` + UID uint32 `json:"uid" platform:"linux,solaris,zos"` // GID is the group id. - GID uint32 `json:"gid" platform:"linux,solaris"` + GID uint32 `json:"gid" platform:"linux,solaris,zos"` // Umask is the umask for the init process. - Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"` + Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"` // AdditionalGids are additional group ids set for the container's process. AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` // Username is the user name. @@ -110,7 +112,7 @@ type Mount struct { // Destination is the absolute path where the mount will be placed in the container. Destination string `json:"destination"` // Type specifies the mount kind. - Type string `json:"type,omitempty" platform:"linux,solaris"` + Type string `json:"type,omitempty" platform:"linux,solaris,zos"` // Source specifies the source path of the mount. Source string `json:"source,omitempty"` // Options are fstab style mount options. @@ -178,7 +180,7 @@ type Linux struct { // MountLabel specifies the selinux context for the mounts in the container. MountLabel string `json:"mountLabel,omitempty"` // IntelRdt contains Intel Resource Director Technology (RDT) information for - // handling resource constraints (e.g., L3 cache, memory bandwidth) for the container + // handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` // Personality contains configuration for the Linux personality syscall Personality *LinuxPersonality `json:"personality,omitempty"` @@ -683,8 +685,9 @@ type LinuxSyscall struct { Args []LinuxSeccompArg `json:"args,omitempty"` } -// LinuxIntelRdt has container runtime resource constraints for Intel RDT -// CAT and MBA features which introduced in Linux 4.10 and 4.12 kernel +// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA +// features and flags enabling Intel RDT CMT and MBM features. +// Intel RDT features are available in Linux 4.14 and newer kernel versions. type LinuxIntelRdt struct { // The identity for RDT Class of Service ClosID string `json:"closID,omitempty"` @@ -697,4 +700,36 @@ type LinuxIntelRdt struct { // The unit of memory bandwidth is specified in "percentages" by // default, and in "MBps" if MBA Software Controller is enabled. MemBwSchema string `json:"memBwSchema,omitempty"` + + // EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of + // the last-level cache (LLC) occupancy for the container. + EnableCMT bool `json:"enableCMT,omitempty"` + + // EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of + // total and local memory bandwidth for the container. + EnableMBM bool `json:"enableMBM,omitempty"` +} + +// ZOS contains platform-specific configuration for z/OS based containers. +type ZOS struct { + // Devices are a list of device nodes that are created for the container + Devices []ZOSDevice `json:"devices,omitempty"` +} + +// ZOSDevice represents the mknod information for a z/OS special device file +type ZOSDevice struct { + // Path to the device. + Path string `json:"path"` + // Device type, block, char, etc. + Type string `json:"type"` + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` + // FileMode permission bits for the device. + FileMode *os.FileMode `json:"fileMode,omitempty"` + // UID of the device. + UID *uint32 `json:"uid,omitempty"` + // Gid of the device. + GID *uint32 `json:"gid,omitempty"` } diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/config.go b/vendor/github.com/opencontainers/runtime-tools/generate/config.go index f68bdde3735..48f281d286b 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/config.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/config.go @@ -123,6 +123,13 @@ func (g *Generator) initConfigLinuxResourcesPids() { } } +func (g *Generator) initConfigLinuxResourcesUnified() { + g.initConfigLinuxResources() + if g.Config.Linux.Resources.Unified == nil { + g.Config.Linux.Resources.Unified = map[string]string{} + } +} + func (g *Generator) initConfigSolaris() { g.initConfig() if g.Config.Solaris == nil { @@ -185,24 +192,3 @@ func (g *Generator) initConfigVM() { g.Config.VM = &rspec.VM{} } } - -func (g *Generator) initConfigVMHypervisor() { - g.initConfigVM() - if &g.Config.VM.Hypervisor == nil { - g.Config.VM.Hypervisor = rspec.VMHypervisor{} - } -} - -func (g *Generator) initConfigVMKernel() { - g.initConfigVM() - if &g.Config.VM.Kernel == nil { - g.Config.VM.Kernel = rspec.VMKernel{} - } -} - -func (g *Generator) initConfigVMImage() { - g.initConfigVM() - if &g.Config.VM.Image == nil { - g.Config.VM.Image = rspec.VMImage{} - } -} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go index b8433ce8f76..be1f027bf6a 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go @@ -182,7 +182,7 @@ func New(os string) (generator Generator, err error) { Destination: "/dev", Type: "tmpfs", Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + Options: []string{"nosuid", "noexec", "strictatime", "mode=755", "size=65536k"}, }, { Destination: "/dev/pts", @@ -604,6 +604,12 @@ func (g *Generator) SetLinuxCgroupsPath(path string) { g.Config.Linux.CgroupsPath = path } +// SetLinuxIntelRdtClosID sets g.Config.Linux.IntelRdt.ClosID +func (g *Generator) SetLinuxIntelRdtClosID(clos string) { + g.initConfigLinuxIntelRdt() + g.Config.Linux.IntelRdt.ClosID = clos +} + // SetLinuxIntelRdtL3CacheSchema sets g.Config.Linux.IntelRdt.L3CacheSchema func (g *Generator) SetLinuxIntelRdtL3CacheSchema(schema string) { g.initConfigLinuxIntelRdt() @@ -851,6 +857,28 @@ func (g *Generator) DropLinuxResourcesHugepageLimit(pageSize string) { } } +// AddLinuxResourcesUnified sets the g.Config.Linux.Resources.Unified +func (g *Generator) SetLinuxResourcesUnified(unified map[string]string) { + g.initConfigLinuxResourcesUnified() + for k, v := range unified { + g.Config.Linux.Resources.Unified[k] = v + } +} + +// AddLinuxResourcesUnified adds or updates the key-value pair from g.Config.Linux.Resources.Unified +func (g *Generator) AddLinuxResourcesUnified(key, val string) { + g.initConfigLinuxResourcesUnified() + g.Config.Linux.Resources.Unified[key] = val +} + +// DropLinuxResourcesUnified drops a key-value pair from g.Config.Linux.Resources.Unified +func (g *Generator) DropLinuxResourcesUnified(key string) { + if g.Config == nil || g.Config.Linux == nil || g.Config.Linux.Resources == nil || g.Config.Linux.Resources.Unified == nil { + return + } + delete(g.Config.Linux.Resources.Unified, key) +} + // SetLinuxResourcesMemoryLimit sets g.Config.Linux.Resources.Memory.Limit. func (g *Generator) SetLinuxResourcesMemoryLimit(limit int64) { g.initConfigLinuxResourcesMemory() @@ -1025,10 +1053,9 @@ func (g *Generator) ClearPreStartHooks() { } // AddPreStartHook add a prestart hook into g.Config.Hooks.Prestart. -func (g *Generator) AddPreStartHook(preStartHook rspec.Hook) error { +func (g *Generator) AddPreStartHook(preStartHook rspec.Hook) { g.initConfigHooks() g.Config.Hooks.Prestart = append(g.Config.Hooks.Prestart, preStartHook) - return nil } // ClearPostStopHooks clear g.Config.Hooks.Poststop. @@ -1040,10 +1067,9 @@ func (g *Generator) ClearPostStopHooks() { } // AddPostStopHook adds a poststop hook into g.Config.Hooks.Poststop. -func (g *Generator) AddPostStopHook(postStopHook rspec.Hook) error { +func (g *Generator) AddPostStopHook(postStopHook rspec.Hook) { g.initConfigHooks() g.Config.Hooks.Poststop = append(g.Config.Hooks.Poststop, postStopHook) - return nil } // ClearPostStartHooks clear g.Config.Hooks.Poststart. @@ -1055,10 +1081,9 @@ func (g *Generator) ClearPostStartHooks() { } // AddPostStartHook adds a poststart hook into g.Config.Hooks.Poststart. -func (g *Generator) AddPostStartHook(postStartHook rspec.Hook) error { +func (g *Generator) AddPostStartHook(postStartHook rspec.Hook) { g.initConfigHooks() g.Config.Hooks.Poststart = append(g.Config.Hooks.Poststart, postStartHook) - return nil } // AddMount adds a mount into g.Config.Mounts. @@ -1560,12 +1585,8 @@ func (g *Generator) RemoveLinuxResourcesDevice(allow bool, devType string, major return } } - return } -// strPtr returns the pointer pointing to the string s. -func strPtr(s string) *string { return &s } - // SetSyscallAction adds rules for syscalls with the specified action func (g *Generator) SetSyscallAction(arguments seccomp.SyscallOpts) error { g.initConfigLinuxSeccomp() @@ -1691,14 +1712,14 @@ func (g *Generator) SetVMHypervisorPath(path string) error { if !strings.HasPrefix(path, "/") { return fmt.Errorf("hypervisorPath %v is not an absolute path", path) } - g.initConfigVMHypervisor() + g.initConfigVM() g.Config.VM.Hypervisor.Path = path return nil } // SetVMHypervisorParameters sets g.Config.VM.Hypervisor.Parameters func (g *Generator) SetVMHypervisorParameters(parameters []string) { - g.initConfigVMHypervisor() + g.initConfigVM() g.Config.VM.Hypervisor.Parameters = parameters } @@ -1707,14 +1728,14 @@ func (g *Generator) SetVMKernelPath(path string) error { if !strings.HasPrefix(path, "/") { return fmt.Errorf("kernelPath %v is not an absolute path", path) } - g.initConfigVMKernel() + g.initConfigVM() g.Config.VM.Kernel.Path = path return nil } // SetVMKernelParameters sets g.Config.VM.Kernel.Parameters func (g *Generator) SetVMKernelParameters(parameters []string) { - g.initConfigVMKernel() + g.initConfigVM() g.Config.VM.Kernel.Parameters = parameters } @@ -1723,7 +1744,7 @@ func (g *Generator) SetVMKernelInitRD(initrd string) error { if !strings.HasPrefix(initrd, "/") { return fmt.Errorf("kernelInitrd %v is not an absolute path", initrd) } - g.initConfigVMKernel() + g.initConfigVM() g.Config.VM.Kernel.InitRD = initrd return nil } @@ -1733,7 +1754,7 @@ func (g *Generator) SetVMImagePath(path string) error { if !strings.HasPrefix(path, "/") { return fmt.Errorf("imagePath %v is not an absolute path", path) } - g.initConfigVMImage() + g.initConfigVM() g.Config.VM.Image.Path = path return nil } @@ -1749,7 +1770,7 @@ func (g *Generator) SetVMImageFormat(format string) error { default: return fmt.Errorf("Commonly supported formats are: raw, qcow2, vdi, vmdk, vhd") } - g.initConfigVMImage() + g.initConfigVM() g.Config.VM.Image.Format = format return nil } diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go index eade5718e0a..f28d8f58752 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/consts.go @@ -4,9 +4,4 @@ const ( seccompOverwrite = "overwrite" seccompAppend = "append" nothing = "nothing" - kill = "kill" - trap = "trap" - trace = "trace" - allow = "allow" - errno = "errno" ) diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go index 311587437fd..93472fba072 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package seccomp diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go index 589b81c1680..b8c1bc26e27 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/seccomp_default_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package seccomp diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go index dbf2aec1c0c..5e84653a94e 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/seccomp/syscall_compare.go @@ -92,22 +92,6 @@ func identical(config1, config2 *rspec.LinuxSyscall) bool { return reflect.DeepEqual(config1, config2) } -func identicalExceptAction(config1, config2 *rspec.LinuxSyscall) bool { - samename := sameName(config1, config2) - sameAction := sameAction(config1, config2) - sameArgs := sameArgs(config1, config2) - - return samename && !sameAction && sameArgs -} - -func identicalExceptArgs(config1, config2 *rspec.LinuxSyscall) bool { - samename := sameName(config1, config2) - sameAction := sameAction(config1, config2) - sameArgs := sameArgs(config1, config2) - - return samename && sameAction && !sameArgs -} - func sameName(config1, config2 *rspec.LinuxSyscall) bool { return reflect.DeepEqual(config1.Names, config2.Names) } diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/validate.go b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go index 9c3710529c5..2d3d42bce2a 100644 --- a/vendor/github.com/opencontainers/runtime-tools/validate/validate.go +++ b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go @@ -131,9 +131,8 @@ func JSONSchemaURL(version string) (url string, err error) { if err != nil { return "", specerror.NewError(specerror.SpecVersionInSemVer, err, rspec.Version) } - configRenamedToConfigSchemaVersion, err := semver.Parse("1.0.0-rc2") // config.json became config-schema.json in 1.0.0-rc2 - if ver.Compare(configRenamedToConfigSchemaVersion) == -1 { - return "", fmt.Errorf("unsupported configuration version (older than %s)", configRenamedToConfigSchemaVersion) + if ver.LT(semver.Version{Major: 1, Minor: 0, Patch: 2}) { + return "", errors.New("unsupported configuration version (older than 1.0.2)") } return fmt.Sprintf(configSchemaTemplate, version), nil } diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go b/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go index dcefafae70f..6f1b28218da 100644 --- a/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go +++ b/vendor/github.com/opencontainers/runtime-tools/validate/validate_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package validate diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/validate_unsupported.go b/vendor/github.com/opencontainers/runtime-tools/validate/validate_unsupported.go index f150c326c1d..313ec3995c5 100644 --- a/vendor/github.com/opencontainers/runtime-tools/validate/validate_unsupported.go +++ b/vendor/github.com/opencontainers/runtime-tools/validate/validate_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package validate diff --git a/vendor/github.com/openshift/imagebuilder/.travis.yml b/vendor/github.com/openshift/imagebuilder/.travis.yml index 97b530b4fd7..ff6afee5a0f 100644 --- a/vendor/github.com/openshift/imagebuilder/.travis.yml +++ b/vendor/github.com/openshift/imagebuilder/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - "1.9" - - "1.10" + - "1.16" + - "1.17" install: diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index df52699046b..71dc41ea560 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -44,6 +44,7 @@ type Run struct { type Executor interface { Preserve(path string) error EnsureContainerPath(path string) error + EnsureContainerPathAs(path, user string, mode *os.FileMode) error Copy(excludes []string, copies ...Copy) error Run(run Run, config docker.Config) error UnrecognizedInstruction(step *Step) error @@ -61,6 +62,15 @@ func (logExecutor) EnsureContainerPath(path string) error { return nil } +func (logExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + if mode != nil { + log.Printf("ENSURE %s AS %q with MODE=%q", path, user, *mode) + } else { + log.Printf("ENSURE %s AS %q", path, user) + } + return nil +} + func (logExecutor) Copy(excludes []string, copies ...Copy) error { for _, c := range copies { log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s, chmod %s", c.Src, c.Dest, c.From, c.Download, c.Chown, c.Chmod) @@ -88,6 +98,10 @@ func (noopExecutor) EnsureContainerPath(path string) error { return nil } +func (noopExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return nil +} + func (noopExecutor) Copy(excludes []string, copies ...Copy) error { return nil } @@ -303,6 +317,9 @@ type Builder struct { PendingCopies []Copy Warnings []string + // Raw platform string specified with `FROM --platform` of the stage + // It's up to the implementation or client to parse and use this field + Platform string } func NewBuilder(args map[string]string) *Builder { @@ -375,7 +392,7 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error { } if len(b.RunConfig.WorkingDir) > 0 { - if err := exec.EnsureContainerPath(b.RunConfig.WorkingDir); err != nil { + if err := exec.EnsureContainerPathAs(b.RunConfig.WorkingDir, b.RunConfig.User, nil); err != nil { return err } } @@ -470,7 +487,7 @@ func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error { b.RunConfig.Env = nil // Check to see if we have a default PATH, note that windows won't - // have one as its set by HCS + // have one as it's set by HCS if runtime.GOOS != "windows" && !hasEnvName(b.Env, "PATH") { b.RunConfig.Env = append(b.RunConfig.Env, "PATH="+defaultPathEnv) } diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index 0d82136e704..c437e4c2382 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -234,6 +234,22 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri return fmt.Errorf("Windows does not support FROM scratch") } } + defaultArgs := envMapAsSlice(builtinBuildArgs) + userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env) + userArgs = mergeEnv(defaultArgs, userArgs) + for _, a := range flagArgs { + arg, err := ProcessWord(a, userArgs) + if err != nil { + return err + } + switch { + case strings.HasPrefix(arg, "--platform="): + platformString := strings.TrimPrefix(arg, "--platform=") + b.Platform = platformString + default: + return fmt.Errorf("FROM only supports the --platform flag") + } + } b.RunConfig.Image = name // TODO: handle onbuild return nil @@ -573,65 +589,63 @@ var targetArgs = []string{"TARGETOS", "TARGETARCH", "TARGETVARIANT"} // to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { - if len(args) != 1 { - return fmt.Errorf("ARG requires exactly one argument definition") - } - var ( name string value string hasDefault bool ) - arg := args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - name = parts[0] - value = parts[1] - hasDefault = true - if name == "TARGETPLATFORM" { - p, err := platforms.Parse(value) - if err != nil { - return fmt.Errorf("error parsing TARGETPLATFORM argument") - } - for _, val := range targetArgs { - b.AllowedArgs[val] = true - } - b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture - b.Args["TARGETOS"] = p.OS - b.Args["TARGETARCH"] = p.Architecture - b.Args["TARGETVARIANT"] = p.Variant - if p.Variant != "" { - b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant + for _, argument := range args { + arg := argument + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + name = parts[0] + value = parts[1] + hasDefault = true + if name == "TARGETPLATFORM" { + p, err := platforms.Parse(value) + if err != nil { + return fmt.Errorf("error parsing TARGETPLATFORM argument") + } + for _, val := range targetArgs { + b.AllowedArgs[val] = true + } + b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture + b.Args["TARGETOS"] = p.OS + b.Args["TARGETARCH"] = p.Architecture + b.Args["TARGETVARIANT"] = p.Variant + if p.Variant != "" { + b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant + } } + } else if val, ok := builtinBuildArgs[arg]; ok { + name = arg + value = val + hasDefault = true + } else { + name = arg + hasDefault = false } - } else if val, ok := builtinBuildArgs[arg]; ok { - name = arg - value = val - hasDefault = true - } else { - name = arg - hasDefault = false - } - // add the arg to allowed list of build-time args from this step on. - b.AllowedArgs[name] = true + // add the arg to allowed list of build-time args from this step on. + b.AllowedArgs[name] = true - // If there is still no default value, a value can be assigned from the heading args - if val, ok := b.HeadingArgs[name]; ok && !hasDefault { - b.Args[name] = val - } + // If there is still no default value, a value can be assigned from the heading args + if val, ok := b.HeadingArgs[name]; ok && !hasDefault { + b.Args[name] = val + } - // If there is a default value associated with this arg then add it to the - // b.buildArgs, later default values for the same arg override earlier ones. - // The args passed to builder (UserArgs) override the default value of 'arg' - // Don't add them here as they were already set in NewBuilder. - if _, ok := b.UserArgs[name]; !ok && hasDefault { - b.Args[name] = value + // If there is a default value associated with this arg then add it to the + // b.buildArgs, later default values for the same arg override earlier ones. + // The args passed to builder (UserArgs) override the default value of 'arg' + // Don't add them here as they were already set in NewBuilder. + if _, ok := b.UserArgs[name]; !ok && hasDefault { + b.Args[name] = value + } } return nil diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec index 79d16ec398d..143bfc355e7 100644 --- a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec +++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec @@ -12,7 +12,7 @@ # %global golang_version 1.8.1 -%{!?version: %global version 1.2.2-dev} +%{!?version: %global version 1.2.4-dev} %{!?release: %global release 1} %global package_name imagebuilder %global product_name Container Image Builder diff --git a/vendor/github.com/openshift/imagebuilder/internals.go b/vendor/github.com/openshift/imagebuilder/internals.go index f33dc70bbf7..d4b95390381 100644 --- a/vendor/github.com/openshift/imagebuilder/internals.go +++ b/vendor/github.com/openshift/imagebuilder/internals.go @@ -52,7 +52,7 @@ func hasSlash(input string) bool { // makeAbsolute ensures that the provided path is absolute. func makeAbsolute(dest, workingDir string) string { - // Twiddle the destination when its a relative path - meaning, make it + // Twiddle the destination when it's a relative path - meaning, make it // relative to the WORKINGDIR if dest == "." { if !hasSlash(workingDir) { diff --git a/vendor/github.com/openshift/imagebuilder/vendor.conf b/vendor/github.com/openshift/imagebuilder/vendor.conf deleted file mode 100644 index 8074ce80a12..00000000000 --- a/vendor/github.com/openshift/imagebuilder/vendor.conf +++ /dev/null @@ -1,25 +0,0 @@ -github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 -github.com/containerd/containerd v1.3.0 -github.com/containers/storage v1.2 -github.com/docker/docker b68221c37ee597950364788204546f9c9d0e46a1 -github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e -github.com/docker/go-units 2fb04c6466a548a03cb009c5569ee1ab1e35398e -github.com/fsouza/go-dockerclient openshift-4.0 https://github.com/openshift/go-dockerclient.git -github.com/gogo/protobuf c5a62797aee0054613cc578653a16c6237fef080 -github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e -github.com/Microsoft/go-winio 1a8911d1ed007260465c3bfbbc785ac6915a0bb8 -github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 -github.com/opencontainers/go-digest ac19fd6e7483ff933754af248d80be865e543d22 -github.com/opencontainers/image-spec 243ea084a44451d27322fed02b682d99e2af3ba9 -github.com/opencontainers/runc 923a8f8a9a07aceada5fc48c4d37e905d9b019b5 -github.com/pkg/errors 27936f6d90f9c8e1145f11ed52ffffbfdb9e0af7 -github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac -github.com/sirupsen/logrus d7b6bf5e4d26448fd977d07d745a2a66097ddecb -golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 -golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 -golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba -k8s.io/klog 8e90cee79f823779174776412c13478955131846 -google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 -github.com/golang/protobuf v1.2.0 -google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 - diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go index 30572ea87ff..6a6acfd9ab3 100644 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go @@ -56,6 +56,9 @@ func (v *GVariant) native() *C.GVariant { } func (v *GVariant) Ptr() unsafe.Pointer { + if v == nil { + return nil + } return v.ptr } diff --git a/vendor/github.com/mtrmac/gpgme/.appveyor.yml b/vendor/github.com/proglottis/gpgme/.appveyor.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.appveyor.yml rename to vendor/github.com/proglottis/gpgme/.appveyor.yml diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.gitignore rename to vendor/github.com/proglottis/gpgme/.gitignore diff --git a/vendor/github.com/mtrmac/gpgme/.travis.yml b/vendor/github.com/proglottis/gpgme/.travis.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.travis.yml rename to vendor/github.com/proglottis/gpgme/.travis.yml diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/proglottis/gpgme/LICENSE similarity index 100% rename from vendor/github.com/mtrmac/gpgme/LICENSE rename to vendor/github.com/proglottis/gpgme/LICENSE diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/proglottis/gpgme/README.md similarity index 100% rename from vendor/github.com/mtrmac/gpgme/README.md rename to vendor/github.com/proglottis/gpgme/README.md diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/callbacks.go rename to vendor/github.com/proglottis/gpgme/callbacks.go diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/data.go rename to vendor/github.com/proglottis/gpgme/data.go diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c similarity index 100% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.c rename to vendor/github.com/proglottis/gpgme/go_gpgme.c diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h similarity index 92% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.h rename to vendor/github.com/proglottis/gpgme/go_gpgme.h index d4826ab368e..eb3a4ba88b1 100644 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -6,11 +6,6 @@ #include -/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */ -#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402 -typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */ -#endif - extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go similarity index 96% rename from vendor/github.com/mtrmac/gpgme/gpgme.go rename to vendor/github.com/proglottis/gpgme/gpgme.go index c19b9aebc5c..9833057a663 100644 --- a/vendor/github.com/mtrmac/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -53,13 +53,13 @@ const ( type PinEntryMode int -// const ( // Unavailable in 1.3.2 -// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT -// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK -// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL -// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR -// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK -// ) +const ( + PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT + PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK + PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL + PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR + PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK +) type EncryptFlag uint @@ -348,19 +348,17 @@ func (c *Context) KeyListMode() KeyListMode { return res } -// Unavailable in 1.3.2: -// func (c *Context) SetPinEntryMode(m PinEntryMode) error { -// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) -// runtime.KeepAlive(c) -// return err -// } +func (c *Context) SetPinEntryMode(m PinEntryMode) error { + err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) + runtime.KeepAlive(c) + return err +} -// Unavailable in 1.3.2: -// func (c *Context) PinEntryMode() PinEntryMode { -// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) -// runtime.KeepAlive(c) -// return res -// } +func (c *Context) PinEntryMode() PinEntryMode { + res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) + runtime.KeepAlive(c) + return res +} func (c *Context) SetCallback(callback Callback) error { var err error diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info.go diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go b/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go deleted file mode 100644 index 5d74cae493a..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/api.go +++ /dev/null @@ -1,39 +0,0 @@ -package api - -import "net" - -const ( - // Version of the REST API, not implementation version. - // See openapi.yaml for the definition. - Version = "1.1.1" -) - -// ErrorJSON is returned with "application/json" content type and non-2XX status code -type ErrorJSON struct { - Message string `json:"message"` -} - -// Info is the structure returned by `GET /info` -type Info struct { - APIVersion string `json:"apiVersion"` // REST API version - Version string `json:"version"` // Implementation version - StateDir string `json:"stateDir"` - ChildPID int `json:"childPID"` - NetworkDriver *NetworkDriverInfo `json:"networkDriver,omitempty"` - PortDriver *PortDriverInfo `json:"portDriver,omitempty"` -} - -// NetworkDriverInfo in Info -type NetworkDriverInfo struct { - Driver string `json:"driver"` - DNS []net.IP `json:"dns,omitempty"` - ChildIP net.IP `json:"childIP,omitempty"` // since API v1.1.1 (RootlessKit v0.14.1) - DynamicChildIP bool `json:"dynamicChildIP,omitempty"` // since API v1.1.1 -} - -// PortDriverInfo in Info -type PortDriverInfo struct { - Driver string `json:"driver"` - Protos []string `json:"protos"` - DisallowLoopbackChildIP bool `json:"disallowLoopbackChildIP,omitempty"` // since API v1.1.1 -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml b/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml deleted file mode 100644 index dffc496808a..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/api/openapi.yaml +++ /dev/null @@ -1,171 +0,0 @@ -# When you made a change to this YAML, please validate with https://editor.swagger.io -openapi: 3.0.3 -info: - version: 1.1.1 - title: RootlessKit API -servers: - - url: 'http://rootlesskit/v1' - description: Local UNIX socket server. The host part of the URL is ignored. -paths: -# /info: API >= 1.1.0 - /info: - get: - responses: - '200': - description: Info. Available since API 1.1.0. - content: - application/json: - schema: - $ref: '#/components/schemas/Info' - /ports: - get: - responses: - '200': - description: An array of PortStatus - content: - application/json: - schema: - $ref: '#/components/schemas/PortStatuses' - post: - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/PortSpec' - responses: - '201': - description: PortStatus with ID - content: - application/json: - schema: - $ref: '#/components/schemas/PortStatus' - '/ports/{id}': - delete: - parameters: - - name: id - in: path - required: true - schema: - type: integer - format: int64 - responses: - '200': - description: Null response -components: - schemas: - Proto: - type: string - description: "protocol for listening. Corresponds to Go's net.Listen. The strings with \"4\" and \"6\" suffixes were introduced in API 1.1.0." - enum: - - tcp - - tcp4 - - tcp6 - - udp - - udp4 - - udp6 - - sctp - - sctp4 - - sctp6 - PortSpec: - required: - - proto - properties: - proto: - $ref: '#/components/schemas/Proto' - parentIP: - type: string - parentPort: - type: integer - format: int32 - minimum: 1 - maximum: 65535 - childIP: - type: string -# future version may support requests with parentPort<=0 for automatic port assignment - childPort: - type: integer - format: int32 - minimum: 1 - maximum: 65535 - PortStatus: - required: - - id - properties: - id: - type: integer - format: int64 - spec: - $ref: '#/components/schemas/PortSpec' - PortStatuses: - type: array - items: - $ref: '#/components/schemas/PortStatus' -# Info: API >= 1.1.0 - Info: - required: - - apiVersion - - version - - stateDir - - childPID - properties: - apiVersion: - type: string - description: "API version, without \"v\" prefix" - example: "1.1.0" - version: - type: string - description: "Implementation version, without \"v\" prefix" - example: "0.42.0-beta.1+dev" - stateDir: - type: string - description: "state dir" - example: "/run/user/1000/rootlesskit" - childPID: - type: integer - description: "child PID" - example: 10042 - networkDriver: - $ref: '#/components/schemas/NetworkDriverInfo' - portDriver: - $ref: '#/components/schemas/PortDriverInfo' - NetworkDriverInfo: - required: - - driver - properties: - driver: - type: string - description: "network driver. Empty when --net=host." - example: "slirp4netns" -# TODO: return TAP info - dns: - type: array - description: "DNS addresses" - items: - type: string - example: ["10.0.2.3"] - childIP: - type: string - description: "Child IP (v4)" - example: "10.0.2.100" - dynamicChildIP: - type: boolean - description: "Child IP may change" - PortDriverInfo: - required: - - driver - - supportedProtos - properties: - driver: - type: string - description: "port driver" - example: "builtin" - protos: - type: array - description: "The supported protocol strings for listening ports" - example: ["tcp","udp"] - items: - $ref: '#/components/schemas/Proto' - disallowLoopbackChildIP: - type: boolean - description: "If this field is set to true, loopback IP such as 127.0.0.1 cannot be specified as a child IP" diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/msgutil/msgutil.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/msgutil/msgutil.go deleted file mode 100644 index a0a0c94c638..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/msgutil/msgutil.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package msgutil provides utility for JSON message with uint32le header -package msgutil - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "io" - - "github.com/pkg/errors" -) - -const ( - maxLength = 1 << 16 -) - -func MarshalToWriter(w io.Writer, x interface{}) (int, error) { - b, err := json.Marshal(x) - if err != nil { - return 0, err - } - if len(b) > maxLength { - return 0, errors.Errorf("bad message length: %d (max: %d)", len(b), maxLength) - } - h := make([]byte, 4) - binary.LittleEndian.PutUint32(h, uint32(len(b))) - return w.Write(append(h, b...)) -} - -func UnmarshalFromReader(r io.Reader, x interface{}) (int, error) { - hdr := make([]byte, 4) - n, err := r.Read(hdr) - if err != nil { - return n, err - } - if n != 4 { - return n, errors.Errorf("read %d bytes, expected 4 bytes", n) - } - bLen := binary.LittleEndian.Uint32(hdr) - if bLen > maxLength || bLen < 1 { - return n, errors.Errorf("bad message length: %d (max: %d)", bLen, maxLength) - } - b := make([]byte, bLen) - n, err = r.Read(b) - if err != nil { - return 4 + n, err - } - if n != int(bLen) { - return 4 + n, errors.Errorf("read %d bytes, expected %d bytes", n, bLen) - } - return 4 + n, json.Unmarshal(b, x) -} - -func Marshal(x interface{}) ([]byte, error) { - var b bytes.Buffer - _, err := MarshalToWriter(&b, x) - return b.Bytes(), err -} - -func Unmarshal(b []byte, x interface{}) error { - n, err := UnmarshalFromReader(bytes.NewReader(b), x) - if n != len(b) { - return errors.Errorf("read %d bytes, expected %d bytes", n, len(b)) - } - return err -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/builtin.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/builtin.go deleted file mode 100644 index ca3f10b26db..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/builtin.go +++ /dev/null @@ -1,14 +0,0 @@ -package builtin - -import ( - "io" - - "github.com/rootless-containers/rootlesskit/pkg/port" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/child" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent" -) - -var ( - NewParentDriver func(logWriter io.Writer, stateDir string) (port.ParentDriver, error) = parent.NewDriver - NewChildDriver func(logWriter io.Writer) port.ChildDriver = child.NewDriver -) diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go deleted file mode 100644 index 05dc0303cd6..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/child/child.go +++ /dev/null @@ -1,158 +0,0 @@ -package child - -import ( - "io" - "net" - "os" - "strconv" - "strings" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" - - "github.com/rootless-containers/rootlesskit/pkg/msgutil" - "github.com/rootless-containers/rootlesskit/pkg/port" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg" - opaquepkg "github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque" -) - -func NewDriver(logWriter io.Writer) port.ChildDriver { - return &childDriver{ - logWriter: logWriter, - } -} - -type childDriver struct { - logWriter io.Writer -} - -func (d *childDriver) RunChildDriver(opaque map[string]string, quit <-chan struct{}) error { - socketPath := opaque[opaquepkg.SocketPath] - if socketPath == "" { - return errors.New("socket path not set") - } - childReadyPipePath := opaque[opaquepkg.ChildReadyPipePath] - if childReadyPipePath == "" { - return errors.New("child ready pipe path not set") - } - childReadyPipeW, err := os.OpenFile(childReadyPipePath, os.O_WRONLY, os.ModeNamedPipe) - if err != nil { - return err - } - ln, err := net.ListenUnix("unix", &net.UnixAddr{ - Name: socketPath, - Net: "unix", - }) - if err != nil { - return err - } - // write nothing, just close - if err = childReadyPipeW.Close(); err != nil { - return err - } - stopAccept := make(chan struct{}, 1) - go func() { - <-quit - stopAccept <- struct{}{} - ln.Close() - }() - for { - c, err := ln.AcceptUnix() - if err != nil { - select { - case <-stopAccept: - return nil - default: - } - return err - } - go func() { - if rerr := d.routine(c); rerr != nil { - rep := msg.Reply{ - Error: rerr.Error(), - } - msgutil.MarshalToWriter(c, &rep) - } - c.Close() - }() - } - return nil -} - -func (d *childDriver) routine(c *net.UnixConn) error { - var req msg.Request - if _, err := msgutil.UnmarshalFromReader(c, &req); err != nil { - return err - } - switch req.Type { - case msg.RequestTypeInit: - return d.handleConnectInit(c, &req) - case msg.RequestTypeConnect: - return d.handleConnectRequest(c, &req) - default: - return errors.Errorf("unknown request type %q", req.Type) - } -} - -func (d *childDriver) handleConnectInit(c *net.UnixConn, req *msg.Request) error { - _, err := msgutil.MarshalToWriter(c, nil) - return err -} - -func (d *childDriver) handleConnectRequest(c *net.UnixConn, req *msg.Request) error { - switch req.Proto { - case "tcp": - case "tcp4": - case "tcp6": - case "udp": - case "udp4": - case "udp6": - default: - return errors.Errorf("unknown proto: %q", req.Proto) - } - // dialProto does not need "4", "6" suffix - dialProto := strings.TrimSuffix(strings.TrimSuffix(req.Proto, "6"), "4") - var dialer net.Dialer - ip := req.IP - if ip == "" { - ip = "127.0.0.1" - } else { - p := net.ParseIP(ip) - if p == nil { - return errors.Errorf("invalid IP: %q", ip) - } - ip = p.String() - } - targetConn, err := dialer.Dial(dialProto, net.JoinHostPort(ip, strconv.Itoa(req.Port))) - if err != nil { - return err - } - defer targetConn.Close() // no effect on duplicated FD - targetConnFiler, ok := targetConn.(filer) - if !ok { - return errors.Errorf("unknown target connection: %+v", targetConn) - } - targetConnFile, err := targetConnFiler.File() - if err != nil { - return err - } - defer targetConnFile.Close() - oob := unix.UnixRights(int(targetConnFile.Fd())) - f, err := c.File() - if err != nil { - return err - } - defer f.Close() - for { - err = unix.Sendmsg(int(f.Fd()), []byte("dummy"), oob, nil, 0) - if err != unix.EINTR { - break - } - } - return err -} - -// filer is implemented by *net.TCPConn and *net.UDPConn -type filer interface { - File() (f *os.File, err error) -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go deleted file mode 100644 index a60d99bd990..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg/msg.go +++ /dev/null @@ -1,140 +0,0 @@ -package msg - -import ( - "net" - "time" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" - - "github.com/rootless-containers/rootlesskit/pkg/msgutil" - "github.com/rootless-containers/rootlesskit/pkg/port" -) - -const ( - RequestTypeInit = "init" - RequestTypeConnect = "connect" -) - -// Request and Response are encoded as JSON with uint32le length header. -type Request struct { - Type string // "init" or "connect" - Proto string // "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6" - IP string - Port int -} - -// Reply may contain FD as OOB -type Reply struct { - Error string -} - -// Initiate sends "init" request to the child UNIX socket. -func Initiate(c *net.UnixConn) error { - req := Request{ - Type: RequestTypeInit, - } - if _, err := msgutil.MarshalToWriter(c, &req); err != nil { - return err - } - if err := c.CloseWrite(); err != nil { - return err - } - var rep Reply - if _, err := msgutil.UnmarshalFromReader(c, &rep); err != nil { - return err - } - return c.CloseRead() -} - -// ConnectToChild connects to the child UNIX socket, and obtains TCP or UDP socket FD -// that corresponds to the port spec. -func ConnectToChild(c *net.UnixConn, spec port.Spec) (int, error) { - req := Request{ - Type: RequestTypeConnect, - Proto: spec.Proto, - Port: spec.ChildPort, - IP: spec.ChildIP, - } - if _, err := msgutil.MarshalToWriter(c, &req); err != nil { - return 0, err - } - if err := c.CloseWrite(); err != nil { - return 0, err - } - oobSpace := unix.CmsgSpace(4) - oob := make([]byte, oobSpace) - var ( - oobN int - err error - ) - for { - _, oobN, _, _, err = c.ReadMsgUnix(nil, oob) - if err != unix.EINTR { - break - } - } - if err != nil { - return 0, err - } - if oobN != oobSpace { - return 0, errors.Errorf("expected OOB space %d, got %d", oobSpace, oobN) - } - oob = oob[:oobN] - fd, err := parseFDFromOOB(oob) - if err != nil { - return 0, err - } - if err := c.CloseRead(); err != nil { - return 0, err - } - return fd, nil -} - -// ConnectToChildWithSocketPath wraps ConnectToChild -func ConnectToChildWithSocketPath(socketPath string, spec port.Spec) (int, error) { - var dialer net.Dialer - conn, err := dialer.Dial("unix", socketPath) - if err != nil { - return 0, err - } - defer conn.Close() - c := conn.(*net.UnixConn) - return ConnectToChild(c, spec) -} - -// ConnectToChildWithRetry retries ConnectToChild every (i*5) milliseconds. -func ConnectToChildWithRetry(socketPath string, spec port.Spec, retries int) (int, error) { - for i := 0; i < retries; i++ { - fd, err := ConnectToChildWithSocketPath(socketPath, spec) - if i == retries-1 && err != nil { - return 0, err - } - if err == nil { - return fd, err - } - // TODO: backoff - time.Sleep(time.Duration(i*5) * time.Millisecond) - } - // NOT REACHED - return 0, errors.New("reached max retry") -} - -func parseFDFromOOB(oob []byte) (int, error) { - scms, err := unix.ParseSocketControlMessage(oob) - if err != nil { - return 0, err - } - if len(scms) != 1 { - return 0, errors.Errorf("unexpected scms: %v", scms) - } - scm := scms[0] - fds, err := unix.ParseUnixRights(&scm) - if err != nil { - return 0, err - } - if len(fds) != 1 { - return 0, errors.Errorf("unexpected fds: %v", fds) - } - return fds[0], nil -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque/opaque.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque/opaque.go deleted file mode 100644 index 391b3d34046..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque/opaque.go +++ /dev/null @@ -1,6 +0,0 @@ -package opaque - -const ( - SocketPath = "builtin.socketpath" - ChildReadyPipePath = "builtin.readypipepath" -) diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go deleted file mode 100644 index c6eecc82619..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/parent.go +++ /dev/null @@ -1,212 +0,0 @@ -package parent - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/pkg/errors" - - "github.com/rootless-containers/rootlesskit/pkg/api" - "github.com/rootless-containers/rootlesskit/pkg/port" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp" - "github.com/rootless-containers/rootlesskit/pkg/port/portutil" -) - -// NewDriver for builtin driver. -func NewDriver(logWriter io.Writer, stateDir string) (port.ParentDriver, error) { - // TODO: consider using socketpair FD instead of socket file - socketPath := filepath.Join(stateDir, ".bp.sock") - childReadyPipePath := filepath.Join(stateDir, ".bp-ready.pipe") - // remove the path just in case the previous rootlesskit instance crashed - if err := os.RemoveAll(childReadyPipePath); err != nil { - return nil, errors.Wrapf(err, "cannot remove %s", childReadyPipePath) - } - if err := syscall.Mkfifo(childReadyPipePath, 0600); err != nil { - return nil, errors.Wrapf(err, "cannot mkfifo %s", childReadyPipePath) - } - d := driver{ - logWriter: logWriter, - socketPath: socketPath, - childReadyPipePath: childReadyPipePath, - ports: make(map[int]*port.Status, 0), - stoppers: make(map[int]func(context.Context) error, 0), - nextID: 1, - } - return &d, nil -} - -type driver struct { - logWriter io.Writer - socketPath string - childReadyPipePath string - mu sync.Mutex - ports map[int]*port.Status - stoppers map[int]func(context.Context) error - nextID int -} - -func (d *driver) Info(ctx context.Context) (*api.PortDriverInfo, error) { - info := &api.PortDriverInfo{ - Driver: "builtin", - Protos: []string{"tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"}, - DisallowLoopbackChildIP: false, - } - return info, nil -} - -func (d *driver) OpaqueForChild() map[string]string { - return map[string]string{ - opaque.SocketPath: d.socketPath, - opaque.ChildReadyPipePath: d.childReadyPipePath, - } -} - -func (d *driver) RunParentDriver(initComplete chan struct{}, quit <-chan struct{}, _ *port.ChildContext) error { - childReadyPipeR, err := os.OpenFile(d.childReadyPipePath, os.O_RDONLY, os.ModeNamedPipe) - if err != nil { - return err - } - if _, err = ioutil.ReadAll(childReadyPipeR); err != nil { - return err - } - childReadyPipeR.Close() - var dialer net.Dialer - conn, err := dialer.Dial("unix", d.socketPath) - if err != nil { - return err - } - err = msg.Initiate(conn.(*net.UnixConn)) - conn.Close() - if err != nil { - return err - } - initComplete <- struct{}{} - <-quit - return nil -} - -func isEPERM(err error) bool { - k := "permission denied" - // As of Go 1.14, errors.Is(err, syscall.EPERM) does not seem to work for - // "listen tcp 0.0.0.0:80: bind: permission denied" error from net.ListenTCP(). - return errors.Is(err, syscall.EPERM) || strings.Contains(err.Error(), k) -} - -// annotateEPERM annotates origErr for human-readability -func annotateEPERM(origErr error, spec port.Spec) error { - // Read "net.ipv4.ip_unprivileged_port_start" value (typically 1024) - // TODO: what for IPv6? - // NOTE: sync.Once should not be used here - b, e := ioutil.ReadFile("/proc/sys/net/ipv4/ip_unprivileged_port_start") - if e != nil { - return origErr - } - start, e := strconv.Atoi(strings.TrimSpace(string(b))) - if e != nil { - return origErr - } - if spec.ParentPort >= start { - // origErr is unrelated to ip_unprivileged_port_start - return origErr - } - text := fmt.Sprintf("cannot expose privileged port %d, you can add 'net.ipv4.ip_unprivileged_port_start=%d' to /etc/sysctl.conf (currently %d)", spec.ParentPort, spec.ParentPort, start) - if filepath.Base(os.Args[0]) == "rootlesskit" { - // NOTE: The following sentence is appended only if Args[0] == "rootlesskit", because it does not apply to Podman (as of Podman v1.9). - // Podman launches the parent driver in the child user namespace (but in the parent network namespace), which disables the file capability. - text += ", or set CAP_NET_BIND_SERVICE on rootlesskit binary" - } - text += fmt.Sprintf(", or choose a larger port number (>= %d)", start) - return errors.Wrap(origErr, text) -} - -func (d *driver) AddPort(ctx context.Context, spec port.Spec) (*port.Status, error) { - d.mu.Lock() - err := portutil.ValidatePortSpec(spec, d.ports) - d.mu.Unlock() - if err != nil { - return nil, err - } - // NOTE: routineStopCh is close-only channel. Do not send any data. - // See commit 4803f18fae1e39d200d98f09e445a97ccd6f5526 `Revert "port/builtin: RemovePort() block until conn is closed"` - routineStopCh := make(chan struct{}) - routineStoppedCh := make(chan error) - routineStop := func(ctx context.Context) error { - close(routineStopCh) - select { - case stoppedResult, stoppedResultOk := <-routineStoppedCh: - if stoppedResultOk { - return stoppedResult - } - return errors.New("routineStoppedCh was closed without sending data?") - case <-ctx.Done(): - return errors.Wrap(err, "timed out while waiting for routineStoppedCh after closing routineStopCh") - } - } - switch spec.Proto { - case "tcp", "tcp4", "tcp6": - err = tcp.Run(d.socketPath, spec, routineStopCh, routineStoppedCh, d.logWriter) - case "udp", "udp4", "udp6": - err = udp.Run(d.socketPath, spec, routineStopCh, routineStoppedCh, d.logWriter) - default: - // NOTREACHED - return nil, errors.New("spec was not validated?") - } - if err != nil { - if isEPERM(err) { - err = annotateEPERM(err, spec) - } - return nil, err - } - d.mu.Lock() - id := d.nextID - st := port.Status{ - ID: id, - Spec: spec, - } - d.ports[id] = &st - d.stoppers[id] = routineStop - d.nextID++ - d.mu.Unlock() - return &st, nil -} - -func (d *driver) ListPorts(ctx context.Context) ([]port.Status, error) { - var ports []port.Status - d.mu.Lock() - for _, p := range d.ports { - ports = append(ports, *p) - } - d.mu.Unlock() - return ports, nil -} - -func (d *driver) RemovePort(ctx context.Context, id int) error { - d.mu.Lock() - defer d.mu.Unlock() - stop, ok := d.stoppers[id] - if !ok { - return errors.Errorf("unknown id: %d", id) - } - if _, ok := ctx.Deadline(); !ok { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, 5*time.Second) - defer cancel() - } - err := stop(ctx) - delete(d.stoppers, id) - delete(d.ports, id) - return err -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go deleted file mode 100644 index 32c714468d7..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go +++ /dev/null @@ -1,108 +0,0 @@ -package tcp - -import ( - "fmt" - "io" - "net" - "os" - "strconv" - "sync" - - "github.com/rootless-containers/rootlesskit/pkg/port" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg" -) - -func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, stoppedCh chan error, logWriter io.Writer) error { - ln, err := net.Listen(spec.Proto, net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort))) - if err != nil { - fmt.Fprintf(logWriter, "listen: %v\n", err) - return err - } - newConns := make(chan net.Conn) - go func() { - for { - c, err := ln.Accept() - if err != nil { - fmt.Fprintf(logWriter, "accept: %v\n", err) - close(newConns) - return - } - newConns <- c - } - }() - go func() { - defer func() { - stoppedCh <- ln.Close() - close(stoppedCh) - }() - for { - select { - case c, ok := <-newConns: - if !ok { - return - } - go func() { - if err := copyConnToChild(c, socketPath, spec, stopCh); err != nil { - fmt.Fprintf(logWriter, "copyConnToChild: %v\n", err) - return - } - }() - case <-stopCh: - return - } - } - }() - // no wait - return nil -} - -func copyConnToChild(c net.Conn, socketPath string, spec port.Spec, stopCh <-chan struct{}) error { - defer c.Close() - // get fd from the child as an SCM_RIGHTS cmsg - fd, err := msg.ConnectToChildWithRetry(socketPath, spec, 10) - if err != nil { - return err - } - f := os.NewFile(uintptr(fd), "") - defer f.Close() - fc, err := net.FileConn(f) - if err != nil { - return err - } - defer fc.Close() - bicopy(c, fc, stopCh) - return nil -} - -// bicopy is based on libnetwork/cmd/proxy/tcp_proxy.go . -// NOTE: sendfile(2) cannot be used for sockets -func bicopy(x, y net.Conn, quit <-chan struct{}) { - var wg sync.WaitGroup - var broker = func(to, from net.Conn) { - io.Copy(to, from) - if fromTCP, ok := from.(*net.TCPConn); ok { - fromTCP.CloseRead() - } - if toTCP, ok := to.(*net.TCPConn); ok { - toTCP.CloseWrite() - } - wg.Done() - } - - wg.Add(2) - go broker(x, y) - go broker(y, x) - finish := make(chan struct{}) - go func() { - wg.Wait() - close(finish) - }() - - select { - case <-quit: - case <-finish: - } - x.Close() - y.Close() - <-finish -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go deleted file mode 100644 index 67062117ac2..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go +++ /dev/null @@ -1,62 +0,0 @@ -package udp - -import ( - "io" - "net" - "os" - "strconv" - - "github.com/pkg/errors" - - "github.com/rootless-containers/rootlesskit/pkg/port" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg" - "github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy" -) - -func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, stoppedCh chan error, logWriter io.Writer) error { - addr, err := net.ResolveUDPAddr(spec.Proto, net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort))) - if err != nil { - return err - } - c, err := net.ListenUDP(spec.Proto, addr) - if err != nil { - return err - } - udpp := &udpproxy.UDPProxy{ - LogWriter: logWriter, - Listener: c, - BackendDial: func() (*net.UDPConn, error) { - // get fd from the child as an SCM_RIGHTS cmsg - fd, err := msg.ConnectToChildWithRetry(socketPath, spec, 10) - if err != nil { - return nil, err - } - f := os.NewFile(uintptr(fd), "") - defer f.Close() - fc, err := net.FileConn(f) - if err != nil { - return nil, err - } - uc, ok := fc.(*net.UDPConn) - if !ok { - return nil, errors.Errorf("file conn doesn't implement *net.UDPConn: %+v", fc) - } - return uc, nil - }, - } - go udpp.Run() - go func() { - for { - select { - case <-stopCh: - // udpp.Close closes ln as well - udpp.Close() - stoppedCh <- nil - close(stoppedCh) - return - } - } - }() - // no wait - return nil -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy/udp_proxy.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy/udp_proxy.go deleted file mode 100644 index af7b7d5d979..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy/udp_proxy.go +++ /dev/null @@ -1,150 +0,0 @@ -// Package udpproxy is from https://raw.githubusercontent.com/docker/libnetwork/fec6476dfa21380bf8ee4d74048515d968c1ee63/cmd/proxy/udp_proxy.go -package udpproxy - -import ( - "encoding/binary" - "fmt" - "io" - "net" - "strings" - "sync" - "syscall" - "time" -) - -const ( - // UDPConnTrackTimeout is the timeout used for UDP connection tracking - UDPConnTrackTimeout = 90 * time.Second - // UDPBufSize is the buffer size for the UDP proxy - UDPBufSize = 65507 -) - -// A net.Addr where the IP is split into two fields so you can use it as a key -// in a map: -type connTrackKey struct { - IPHigh uint64 - IPLow uint64 - Port int -} - -func newConnTrackKey(addr *net.UDPAddr) *connTrackKey { - if len(addr.IP) == net.IPv4len { - return &connTrackKey{ - IPHigh: 0, - IPLow: uint64(binary.BigEndian.Uint32(addr.IP)), - Port: addr.Port, - } - } - return &connTrackKey{ - IPHigh: binary.BigEndian.Uint64(addr.IP[:8]), - IPLow: binary.BigEndian.Uint64(addr.IP[8:]), - Port: addr.Port, - } -} - -type connTrackMap map[connTrackKey]*net.UDPConn - -// UDPProxy is proxy for which handles UDP datagrams. -// From libnetwork udp_proxy.go . -type UDPProxy struct { - LogWriter io.Writer - Listener *net.UDPConn - BackendDial func() (*net.UDPConn, error) - connTrackTable connTrackMap - connTrackLock sync.Mutex -} - -func (proxy *UDPProxy) replyLoop(proxyConn *net.UDPConn, clientAddr *net.UDPAddr, clientKey *connTrackKey) { - defer func() { - proxy.connTrackLock.Lock() - delete(proxy.connTrackTable, *clientKey) - proxy.connTrackLock.Unlock() - proxyConn.Close() - }() - - readBuf := make([]byte, UDPBufSize) - for { - proxyConn.SetReadDeadline(time.Now().Add(UDPConnTrackTimeout)) - again: - read, err := proxyConn.Read(readBuf) - if err != nil { - if err, ok := err.(*net.OpError); ok && err.Err == syscall.ECONNREFUSED { - // This will happen if the last write failed - // (e.g: nothing is actually listening on the - // proxied port on the container), ignore it - // and continue until UDPConnTrackTimeout - // expires: - goto again - } - return - } - for i := 0; i != read; { - written, err := proxy.Listener.WriteToUDP(readBuf[i:read], clientAddr) - if err != nil { - return - } - i += written - } - } -} - -// Run starts forwarding the traffic using UDP. -func (proxy *UDPProxy) Run() { - proxy.connTrackTable = make(connTrackMap) - readBuf := make([]byte, UDPBufSize) - for { - read, from, err := proxy.Listener.ReadFromUDP(readBuf) - if err != nil { - // NOTE: Apparently ReadFrom doesn't return - // ECONNREFUSED like Read do (see comment in - // UDPProxy.replyLoop) - if !isClosedError(err) { - fmt.Fprintf(proxy.LogWriter, "Stopping proxy on udp: %v\n", err) - } - break - } - - fromKey := newConnTrackKey(from) - proxy.connTrackLock.Lock() - proxyConn, hit := proxy.connTrackTable[*fromKey] - if !hit { - proxyConn, err = proxy.BackendDial() - if err != nil { - fmt.Fprintf(proxy.LogWriter, "Can't proxy a datagram to udp: %v\n", err) - proxy.connTrackLock.Unlock() - continue - } - proxy.connTrackTable[*fromKey] = proxyConn - go proxy.replyLoop(proxyConn, from, fromKey) - } - proxy.connTrackLock.Unlock() - for i := 0; i != read; { - written, err := proxyConn.Write(readBuf[i:read]) - if err != nil { - fmt.Fprintf(proxy.LogWriter, "Can't proxy a datagram to udp: %v\n", err) - break - } - i += written - } - } -} - -// Close stops forwarding the traffic. -func (proxy *UDPProxy) Close() { - proxy.Listener.Close() - proxy.connTrackLock.Lock() - defer proxy.connTrackLock.Unlock() - for _, conn := range proxy.connTrackTable { - conn.Close() - } -} - -func isClosedError(err error) bool { - /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing. - * See: - * http://golang.org/src/pkg/net/net.go - * https://code.google.com/p/go/issues/detail?id=4337 - * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ - */ - return strings.HasSuffix(err.Error(), "use of closed network connection") -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go deleted file mode 100644 index c95bfc7c74a..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/port.go +++ /dev/null @@ -1,61 +0,0 @@ -package port - -import ( - "context" - "net" - - "github.com/rootless-containers/rootlesskit/pkg/api" -) - -type Spec struct { - // Proto is one of ["tcp", "tcp4", "tcp6", "udp", "udp4", "udp6"]. - // "tcp" may cause listening on both IPv4 and IPv6. (Corresponds to Go's net.Listen .) - Proto string `json:"proto,omitempty"` - ParentIP string `json:"parentIP,omitempty"` // IPv4 or IPv6 address. can be empty (0.0.0.0). - ParentPort int `json:"parentPort,omitempty"` - ChildPort int `json:"childPort,omitempty"` - // ChildIP is an IPv4 or IPv6 address. - // Default values: - // - builtin driver: 127.0.0.1 - // - slirp4netns driver: slirp4netns's child IP, e.g., 10.0.2.100 - ChildIP string `json:"childIP,omitempty"` -} - -type Status struct { - ID int `json:"id"` - Spec Spec `json:"spec"` -} - -// Manager MUST be thread-safe. -type Manager interface { - AddPort(ctx context.Context, spec Spec) (*Status, error) - ListPorts(ctx context.Context) ([]Status, error) - RemovePort(ctx context.Context, id int) error -} - -// ChildContext is used for RunParentDriver -type ChildContext struct { - // PID of the child, can be used for ns-entering to the child namespaces. - PID int - // IP of the tap device - IP net.IP -} - -// ParentDriver is a driver for the parent process. -type ParentDriver interface { - Manager - Info(ctx context.Context) (*api.PortDriverInfo, error) - // OpaqueForChild typically consists of socket path - // for controlling child from parent - OpaqueForChild() map[string]string - // RunParentDriver signals initComplete when ParentDriver is ready to - // serve as Manager. - // RunParentDriver blocks until quit is signaled. - // - // ChildContext is optional. - RunParentDriver(initComplete chan struct{}, quit <-chan struct{}, cctx *ChildContext) error -} - -type ChildDriver interface { - RunChildDriver(opaque map[string]string, quit <-chan struct{}) error -} diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go deleted file mode 100644 index 93793264298..00000000000 --- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go +++ /dev/null @@ -1,163 +0,0 @@ -package portutil - -import ( - "net" - "strconv" - "strings" - "text/scanner" - - "github.com/pkg/errors" - - "github.com/rootless-containers/rootlesskit/pkg/port" -) - -// ParsePortSpec parses a Docker-like representation of PortSpec, but with -// support for both "parent IP" and "child IP" (optional); -// e.g. "127.0.0.1:8080:80/tcp", or "127.0.0.1:8080:10.0.2.100:80/tcp" -// -// Format is as follows: -// -// :[:]:/ -// -// Note that (child IP being optional) the format can either contain 5 or 4 -// components. When using IPv6 IP addresses, addresses must use square brackets -// to prevent the colons being mistaken for delimiters. For example: -// -// [::1]:8080:[::2]:80/udp -func ParsePortSpec(portSpec string) (*port.Spec, error) { - const ( - parentIP = iota - parentPort = iota - childIP = iota - childPort = iota - proto = iota - ) - - var ( - s scanner.Scanner - err error - parts = make([]string, 5) - index = parentIP - delimiter = ':' - ) - - // First get the "proto" and "parent-port" at the end. These parts are - // required, whereas "ParentIP" is optional. Removing them first makes - // it easier to parse the remaining parts, as otherwise the third part - // could be _either_ an IP-address _or_ a Port. - - // Get the proto - protoPos := strings.LastIndex(portSpec, "/") - if protoPos < 0 { - return nil, errors.Errorf("missing proto in PortSpec string: %q", portSpec) - } - parts[proto] = portSpec[protoPos+1:] - err = validateProto(parts[proto]) - if err != nil { - return nil, errors.Wrapf(err, "invalid PortSpec string: %q", portSpec) - } - - // Get the parent port - portPos := strings.LastIndex(portSpec, ":") - if portPos < 0 { - return nil, errors.Errorf("unexpected PortSpec string: %q", portSpec) - } - parts[childPort] = portSpec[portPos+1 : protoPos] - - // Scan the remainder ":[:]" - s.Init(strings.NewReader(portSpec[:portPos])) - - for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() { - if index > childPort { - return nil, errors.Errorf("unexpected PortSpec string: %q", portSpec) - } - - switch tok { - case '[': - // Start of IPv6 IP-address; value ends at closing bracket (]) - delimiter = ']' - continue - case delimiter: - if delimiter == ']' { - // End of IPv6 IP-address - delimiter = ':' - // Skip the next token, which should be a colon delimiter (:) - tok = s.Scan() - } - index++ - continue - default: - parts[index] += s.TokenText() - } - } - - if parts[parentIP] != "" && net.ParseIP(parts[parentIP]) == nil { - return nil, errors.Errorf("unexpected ParentIP in PortSpec string: %q", portSpec) - } - if parts[childIP] != "" && net.ParseIP(parts[childIP]) == nil { - return nil, errors.Errorf("unexpected ParentIP in PortSpec string: %q", portSpec) - } - - ps := &port.Spec{ - Proto: parts[proto], - ParentIP: parts[parentIP], - ChildIP: parts[childIP], - } - - ps.ParentPort, err = strconv.Atoi(parts[parentPort]) - if err != nil { - return nil, errors.Wrapf(err, "unexpected ChildPort in PortSpec string: %q", portSpec) - } - - ps.ChildPort, err = strconv.Atoi(parts[childPort]) - if err != nil { - return nil, errors.Wrapf(err, "unexpected ParentPort in PortSpec string: %q", portSpec) - } - - return ps, nil -} - -// ValidatePortSpec validates *port.Spec. -// existingPorts can be optionally passed for detecting conflicts. -func ValidatePortSpec(spec port.Spec, existingPorts map[int]*port.Status) error { - if err := validateProto(spec.Proto); err != nil { - return err - } - if spec.ParentIP != "" { - if net.ParseIP(spec.ParentIP) == nil { - return errors.Errorf("invalid ParentIP: %q", spec.ParentIP) - } - } - if spec.ChildIP != "" { - if net.ParseIP(spec.ChildIP) == nil { - return errors.Errorf("invalid ChildIP: %q", spec.ChildIP) - } - } - if spec.ParentPort <= 0 || spec.ParentPort > 65535 { - return errors.Errorf("invalid ParentPort: %q", spec.ParentPort) - } - if spec.ChildPort <= 0 || spec.ChildPort > 65535 { - return errors.Errorf("invalid ChildPort: %q", spec.ChildPort) - } - for id, p := range existingPorts { - sp := p.Spec - sameProto := sp.Proto == spec.Proto - sameParent := sp.ParentIP == spec.ParentIP && sp.ParentPort == spec.ParentPort - if sameProto && sameParent { - return errors.Errorf("conflict with ID %d", id) - } - } - return nil -} - -func validateProto(proto string) error { - switch proto { - case - "tcp", "tcp4", "tcp6", - "udp", "udp4", "udp6", - "sctp", "sctp4", "sctp6": - return nil - default: - return errors.Errorf("unknown proto: %q", proto) - } -} diff --git a/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml new file mode 100644 index 00000000000..7df8aa19838 --- /dev/null +++ b/vendor/github.com/seccomp/libseccomp-golang/.golangci.yml @@ -0,0 +1,4 @@ +# For documentation, see https://golangci-lint.run/usage/configuration/ +linters: + enable: + - gofumpt diff --git a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml deleted file mode 100644 index 5240d462280..00000000000 --- a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml +++ /dev/null @@ -1,57 +0,0 @@ -# Travis CI configuration for libseccomp-golang - -# https://docs.travis-ci.com/user/reference/bionic -# https://wiki.ubuntu.com/Releases - -dist: bionic -sudo: false - -notifications: - email: - on_success: always - on_failure: always - -arch: - - amd64 - -os: - - linux - -language: go - -jobs: - include: - - name: "last libseccomp 2.5.0" - env: - - SECCOMP_VER=2.5.0 - - SECCOMP_SHA256SUM=1ffa7038d2720ad191919816db3479295a4bcca1ec14e02f672539f4983014f3 - - name: "compat libseccomp 2.4.4" - env: - - SECCOMP_VER=2.4.4 - - SECCOMP_SHA256SUM=4e79738d1ef3c9b7ca9769f1f8b8d84fc17143c2c1c432e53b9c64787e0ff3eb - - name: "compat libseccomp 2.2.1" - env: - - SECCOMP_VER=2.2.1 - - SECCOMP_SHA256SUM=0ba1789f54786c644af54cdffc9fd0dd0a8bb2b2ee153933f658855d2851a740 - -addons: - apt: - packages: - - build-essential - - astyle - - golint - - gperf - -install: - - go get -u golang.org/x/lint/golint - -# run all of the tests independently, fail if any of the tests error -script: - - wget https://github.com/seccomp/libseccomp/releases/download/v$SECCOMP_VER/libseccomp-$SECCOMP_VER.tar.gz - - echo $SECCOMP_SHA256SUM libseccomp-$SECCOMP_VER.tar.gz | sha256sum -c - - tar xf libseccomp-$SECCOMP_VER.tar.gz - - pushd libseccomp-$SECCOMP_VER && ./configure --prefix=/opt/libseccomp-$SECCOMP_VER && make && sudo make install && popd - - make check-syntax - - make lint - - PKG_CONFIG_PATH=/opt/libseccomp-$SECCOMP_VER/lib/pkgconfig LD_LIBRARY_PATH=/opt/libseccomp-$SECCOMP_VER/lib make vet - - PKG_CONFIG_PATH=/opt/libseccomp-$SECCOMP_VER/lib/pkgconfig LD_LIBRARY_PATH=/opt/libseccomp-$SECCOMP_VER/lib make test diff --git a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md index d6862cbd5f9..c2fc80d5af6 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md +++ b/vendor/github.com/seccomp/libseccomp-golang/CONTRIBUTING.md @@ -1,31 +1,23 @@ -How to Submit Patches to the libseccomp Project +How to Submit Patches to the libseccomp-golang Project =============================================================================== https://github.com/seccomp/libseccomp-golang This document is intended to act as a guide to help you contribute to the -libseccomp project. It is not perfect, and there will always be exceptions -to the rules described here, but by following the instructions below you -should have a much easier time getting your work merged with the upstream +libseccomp-golang project. It is not perfect, and there will always be +exceptions to the rules described here, but by following the instructions below +you should have a much easier time getting your work merged with the upstream project. ## Test Your Code Using Existing Tests -There are two possible tests you can run to verify your code. The first -test is used to check the formatting and coding style of your changes, you -can run the test with the following command: - - # make check-syntax - -... if there are any problems with your changes a diff/patch will be shown -which indicates the problems and how to fix them. - -The second possible test is used to ensure the sanity of your code changes -and to test these changes against the included tests. You can run the test -with the following command: +A number of tests and lint related recipes are provided in the Makefile, if +you want to run the standard regression tests, you can execute the following: # make check -... if there are any faults or errors they will be displayed. +In order to use it, the 'golangci-lint' tool is needed, which can be found at: + +* https://github.com/golangci/golangci-lint ## Add New Tests for New Functionality diff --git a/vendor/github.com/seccomp/libseccomp-golang/Makefile b/vendor/github.com/seccomp/libseccomp-golang/Makefile index 38cfa852cd6..530f5b4adbc 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/Makefile +++ b/vendor/github.com/seccomp/libseccomp-golang/Makefile @@ -4,7 +4,7 @@ all: check-build -check: vet test +check: lint test check-build: go build @@ -16,7 +16,7 @@ fix-syntax: gofmt -w . vet: - go vet -v + go vet -v ./... # Previous bugs have made the tests freeze until the timeout. Golang default # timeout for tests is 10 minutes, which is too long, considering current tests @@ -28,5 +28,4 @@ test: go test -v -timeout $(TEST_TIMEOUT) lint: - @$(if $(shell which golint),true,$(error "install golint and include it in your PATH")) - golint -set_exit_status + golangci-lint run . diff --git a/vendor/github.com/seccomp/libseccomp-golang/README.md b/vendor/github.com/seccomp/libseccomp-golang/README.md index 806a5ddf29b..6430f1c9e25 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/README.md +++ b/vendor/github.com/seccomp/libseccomp-golang/README.md @@ -2,7 +2,9 @@ =============================================================================== https://github.com/seccomp/libseccomp-golang -[![Build Status](https://img.shields.io/travis/seccomp/libseccomp-golang/main.svg)](https://travis-ci.org/seccomp/libseccomp-golang) +[![Go Reference](https://pkg.go.dev/badge/github.com/seccomp/libseccomp-golang.svg)](https://pkg.go.dev/github.com/seccomp/libseccomp-golang) +[![validate](https://github.com/seccomp/libseccomp-golang/actions/workflows/validate.yml/badge.svg)](https://github.com/seccomp/libseccomp-golang/actions/workflows/validate.yml) +[![test](https://github.com/seccomp/libseccomp-golang/actions/workflows/test.yml/badge.svg)](https://github.com/seccomp/libseccomp-golang/actions/workflows/test.yml) The libseccomp library provides an easy to use, platform independent, interface to the Linux Kernel's syscall filtering mechanism. The libseccomp API is @@ -26,26 +28,14 @@ list. * https://groups.google.com/d/forum/libseccomp -Documentation is also available at: +Documentation for this package is also available at: -* https://godoc.org/github.com/seccomp/libseccomp-golang +* https://pkg.go.dev/github.com/seccomp/libseccomp-golang ## Installing the package -The libseccomp-golang bindings require at least Go v1.2.1 and GCC v4.8.4; -earlier versions may yield unpredictable results. If you meet these -requirements you can install this package using the command below: - # go get github.com/seccomp/libseccomp-golang -## Testing the Library - -A number of tests and lint related recipes are provided in the Makefile, if -you want to run the standard regression tests, you can excute the following: - - # make check - -In order to execute the 'make lint' recipe the 'golint' tool is needed, it -can be found at: +## Contributing -* https://github.com/golang/lint +See [CONTRIBUTING.md](CONTRIBUTING.md). diff --git a/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md new file mode 100644 index 00000000000..c448faa8e80 --- /dev/null +++ b/vendor/github.com/seccomp/libseccomp-golang/SECURITY.md @@ -0,0 +1,47 @@ +The libseccomp-golang Security Vulnerability Handling Process +=============================================================================== +https://github.com/seccomp/libseccomp-golang + +This document document attempts to describe the processes through which +sensitive security relevant bugs can be responsibly disclosed to the +libseccomp-golang project and how the project maintainers should handle these +reports. Just like the other libseccomp-golang process documents, this +document should be treated as a guiding document and not a hard, unyielding set +of regulations; the bug reporters and project maintainers are encouraged to +work together to address the issues as best they can, in a manner which works +best for all parties involved. + +### Reporting Problems + +Problems with the libseccomp-golang library that are not suitable for immediate +public disclosure should be emailed to the current libseccomp-golang +maintainers, the list is below. We typically request at most a 90 day time +period to address the issue before it is made public, but we will make every +effort to address the issue as quickly as possible and shorten the disclosure +window. + +* Paul Moore, paul@paul-moore.com +* Tom Hromatka, tom.hromatka@oracle.com + +### Resolving Sensitive Security Issues + +Upon disclosure of a bug, the maintainers should work together to investigate +the problem and decide on a solution. In order to prevent an early disclosure +of the problem, those working on the solution should do so privately and +outside of the traditional libseccomp-golang development practices. One +possible solution to this is to leverage the GitHub "Security" functionality to +create a private development fork that can be shared among the maintainers, and +optionally the reporter. A placeholder GitHub issue may be created, but +details should remain extremely limited until such time as the problem has been +fixed and responsibly disclosed. If a CVE, or other tag, has been assigned to +the problem, the GitHub issue title should include the vulnerability tag once +the problem has been disclosed. + +### Public Disclosure + +Whenever possible, responsible reporting and patching practices should be +followed, including notification to the linux-distros and oss-security mailing +lists. + +* https://oss-security.openwall.org/wiki/mailing-lists/distros +* https://oss-security.openwall.org/wiki/mailing-lists/oss-security diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go index e9b92e2219a..8dad12fdbb9 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go +++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go @@ -1,5 +1,3 @@ -// +build linux - // Public API specification for libseccomp Go bindings // Contains public API for the bindings @@ -18,48 +16,36 @@ import ( "unsafe" ) -// C wrapping code - -// To compile libseccomp-golang against a specific version of libseccomp: -// cd ../libseccomp && mkdir -p prefix -// ./configure --prefix=$PWD/prefix && make && make install -// cd ../libseccomp-golang -// PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make -// LD_PRELOAD=$PWD/../libseccomp/prefix/lib/libseccomp.so.2.5.0 PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make test - -// #cgo pkg-config: libseccomp // #include // #include import "C" // Exported types -// VersionError denotes that the system libseccomp version is incompatible -// with this package. +// VersionError represents an error when either the system libseccomp version +// or the kernel version is too old to perform the operation requested. type VersionError struct { - message string - minimum string + op string // operation that failed or would fail + major, minor, micro uint // minimally required libseccomp version + curAPI, minAPI uint // current and minimally required API versions } func init() { // This forces the cgo libseccomp to initialize its internal API support state, // which is necessary on older versions of libseccomp in order to work // correctly. - GetAPI() + _, _ = getAPI() } func (e VersionError) Error() string { - messageStr := "" - if e.message != "" { - messageStr = e.message + ": " + if e.minAPI != 0 { + return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d and API level >= %d "+ + "(current version: %d.%d.%d, API level: %d)", + e.op, e.major, e.minor, e.micro, e.minAPI, + verMajor, verMinor, verMicro, e.curAPI) } - minimumStr := "" - if e.minimum != "" { - minimumStr = e.minimum - } else { - minimumStr = "2.2.0" - } - return fmt.Sprintf("Libseccomp version too low: %sminimum supported is %s: detected %d.%d.%d", messageStr, minimumStr, verMajor, verMinor, verMicro) + return fmt.Sprintf("%s requires libseccomp >= %d.%d.%d (current version: %d.%d.%d)", + e.op, e.major, e.minor, e.micro, verMajor, verMinor, verMicro) } // ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a @@ -148,44 +134,46 @@ const ( // variables are invalid ArchInvalid ScmpArch = iota // ArchNative is the native architecture of the kernel - ArchNative ScmpArch = iota + ArchNative // ArchX86 represents 32-bit x86 syscalls - ArchX86 ScmpArch = iota + ArchX86 // ArchAMD64 represents 64-bit x86-64 syscalls - ArchAMD64 ScmpArch = iota + ArchAMD64 // ArchX32 represents 64-bit x86-64 syscalls (32-bit pointers) - ArchX32 ScmpArch = iota + ArchX32 // ArchARM represents 32-bit ARM syscalls - ArchARM ScmpArch = iota + ArchARM // ArchARM64 represents 64-bit ARM syscalls - ArchARM64 ScmpArch = iota + ArchARM64 // ArchMIPS represents 32-bit MIPS syscalls - ArchMIPS ScmpArch = iota + ArchMIPS // ArchMIPS64 represents 64-bit MIPS syscalls - ArchMIPS64 ScmpArch = iota + ArchMIPS64 // ArchMIPS64N32 represents 64-bit MIPS syscalls (32-bit pointers) - ArchMIPS64N32 ScmpArch = iota + ArchMIPS64N32 // ArchMIPSEL represents 32-bit MIPS syscalls (little endian) - ArchMIPSEL ScmpArch = iota + ArchMIPSEL // ArchMIPSEL64 represents 64-bit MIPS syscalls (little endian) - ArchMIPSEL64 ScmpArch = iota + ArchMIPSEL64 // ArchMIPSEL64N32 represents 64-bit MIPS syscalls (little endian, // 32-bit pointers) - ArchMIPSEL64N32 ScmpArch = iota + ArchMIPSEL64N32 // ArchPPC represents 32-bit POWERPC syscalls - ArchPPC ScmpArch = iota + ArchPPC // ArchPPC64 represents 64-bit POWER syscalls (big endian) - ArchPPC64 ScmpArch = iota + ArchPPC64 // ArchPPC64LE represents 64-bit POWER syscalls (little endian) - ArchPPC64LE ScmpArch = iota + ArchPPC64LE // ArchS390 represents 31-bit System z/390 syscalls - ArchS390 ScmpArch = iota + ArchS390 // ArchS390X represents 64-bit System z/390 syscalls - ArchS390X ScmpArch = iota + ArchS390X // ArchPARISC represents 32-bit PA-RISC - ArchPARISC ScmpArch = iota + ArchPARISC // ArchPARISC64 represents 64-bit PA-RISC - ArchPARISC64 ScmpArch = iota + ArchPARISC64 + // ArchRISCV64 represents RISCV64 + ArchRISCV64 ) const ( @@ -194,34 +182,36 @@ const ( // ActInvalid is a placeholder to ensure uninitialized ScmpAction // variables are invalid ActInvalid ScmpAction = iota - // ActKill kills the thread that violated the rule. It is the same as ActKillThread. + // ActKillThread kills the thread that violated the rule. // All other threads from the same thread group will continue to execute. - ActKill ScmpAction = iota + ActKillThread // ActTrap throws SIGSYS - ActTrap ScmpAction = iota + ActTrap // ActNotify triggers a userspace notification. This action is only usable when // libseccomp API level 6 or higher is supported. - ActNotify ScmpAction = iota + ActNotify // ActErrno causes the syscall to return a negative error code. This // code can be set with the SetReturnCode method - ActErrno ScmpAction = iota + ActErrno // ActTrace causes the syscall to notify tracing processes with the // given error code. This code can be set with the SetReturnCode method - ActTrace ScmpAction = iota + ActTrace // ActAllow permits the syscall to continue execution - ActAllow ScmpAction = iota + ActAllow // ActLog permits the syscall to continue execution after logging it. // This action is only usable when libseccomp API level 3 or higher is // supported. - ActLog ScmpAction = iota - // ActKillThread kills the thread that violated the rule. It is the same as ActKill. - // All other threads from the same thread group will continue to execute. - ActKillThread ScmpAction = iota + ActLog // ActKillProcess kills the process that violated the rule. // All threads in the thread group are also terminated. // This action is only usable when libseccomp API level 3 or higher is // supported. - ActKillProcess ScmpAction = iota + ActKillProcess + // ActKill kills the thread that violated the rule. + // All other threads from the same thread group will continue to execute. + // + // Deprecated: use ActKillThread + ActKill = ActKillThread ) const ( @@ -234,36 +224,35 @@ const ( CompareInvalid ScmpCompareOp = iota // CompareNotEqual returns true if the argument is not equal to the // given value - CompareNotEqual ScmpCompareOp = iota + CompareNotEqual // CompareLess returns true if the argument is less than the given value - CompareLess ScmpCompareOp = iota + CompareLess // CompareLessOrEqual returns true if the argument is less than or equal // to the given value - CompareLessOrEqual ScmpCompareOp = iota + CompareLessOrEqual // CompareEqual returns true if the argument is equal to the given value - CompareEqual ScmpCompareOp = iota + CompareEqual // CompareGreaterEqual returns true if the argument is greater than or // equal to the given value - CompareGreaterEqual ScmpCompareOp = iota + CompareGreaterEqual // CompareGreater returns true if the argument is greater than the given // value - CompareGreater ScmpCompareOp = iota - // CompareMaskedEqual returns true if the argument is equal to the given - // value, when masked (bitwise &) against the second given value - CompareMaskedEqual ScmpCompareOp = iota + CompareGreater + // CompareMaskedEqual returns true if the masked argument value is + // equal to the masked datum value. Mask is the first argument, and + // datum is the second one. + CompareMaskedEqual ) -var ( - // ErrSyscallDoesNotExist represents an error condition where - // libseccomp is unable to resolve the syscall - ErrSyscallDoesNotExist = fmt.Errorf("could not resolve syscall name") -) +// ErrSyscallDoesNotExist represents an error condition where +// libseccomp is unable to resolve the syscall +var ErrSyscallDoesNotExist = fmt.Errorf("could not resolve syscall name") const ( // Userspace notification response flags // NotifRespFlagContinue tells the kernel to continue executing the system - // call that triggered the notification. Must only be used when the notication + // call that triggered the notification. Must only be used when the notification // response's error is 0. NotifRespFlagContinue uint32 = 1 ) @@ -314,6 +303,8 @@ func GetArchFromString(arch string) (ScmpArch, error) { return ArchPARISC, nil case "parisc64": return ArchPARISC64, nil + case "riscv64": + return ArchRISCV64, nil default: return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch) } @@ -358,6 +349,8 @@ func (a ScmpArch) String() string { return "parisc" case ArchPARISC64: return "parisc64" + case ArchRISCV64: + return "riscv64" case ArchNative: return "native" case ArchInvalid: @@ -394,7 +387,7 @@ func (a ScmpCompareOp) String() string { // String returns a string representation of a seccomp match action func (a ScmpAction) String() string { switch a & 0xFFFF { - case ActKill, ActKillThread: + case ActKillThread: return "Action: Kill thread" case ActKillProcess: return "Action: Kill process" @@ -556,8 +549,8 @@ func MakeCondition(arg uint, comparison ScmpCompareOp, values ...uint64) (ScmpCo return condStruct, err } - if comparison == CompareInvalid { - return condStruct, fmt.Errorf("invalid comparison operator") + if err := sanitizeCompareOp(comparison); err != nil { + return condStruct, err } else if arg > 5 { return condStruct, fmt.Errorf("syscalls only have up to 6 arguments (%d given)", arg) } else if len(values) > 2 { @@ -874,10 +867,8 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) { func (f *ScmpFilter) GetLogBit() (bool, error) { log, err := f.getFilterAttr(filterAttrLog) if err != nil { - // Ignore error, if not supported returns apiLevel == 0 - apiLevel, _ := GetAPI() - if apiLevel < 3 { - return false, fmt.Errorf("getting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher") + if e := checkAPI("GetLogBit", 3, 2, 4, 0); e != nil { + err = e } return false, err @@ -899,9 +890,8 @@ func (f *ScmpFilter) GetLogBit() (bool, error) { func (f *ScmpFilter) GetSSB() (bool, error) { ssb, err := f.getFilterAttr(filterAttrSSB) if err != nil { - api, apiErr := getAPI() - if (apiErr != nil && api == 0) || (apiErr == nil && api < 4) { - return false, fmt.Errorf("getting the SSB flag is only supported in libseccomp 2.5.0 and newer with API level 4 or higher") + if e := checkAPI("GetSSB", 4, 2, 5, 0); e != nil { + err = e } return false, err @@ -914,6 +904,42 @@ func (f *ScmpFilter) GetSSB() (bool, error) { return true, nil } +// GetOptimize returns the current optimization level of the filter, +// or an error if an issue was encountered retrieving the value. +// See SetOptimize for more details. +func (f *ScmpFilter) GetOptimize() (int, error) { + level, err := f.getFilterAttr(filterAttrOptimize) + if err != nil { + if e := checkAPI("GetOptimize", 4, 2, 5, 0); e != nil { + err = e + } + + return 0, err + } + + return int(level), nil +} + +// GetRawRC returns the current state of RawRC flag, or an error +// if an issue was encountered retrieving the value. +// See SetRawRC for more details. +func (f *ScmpFilter) GetRawRC() (bool, error) { + rawrc, err := f.getFilterAttr(filterAttrRawRC) + if err != nil { + if e := checkAPI("GetRawRC", 4, 2, 5, 0); e != nil { + err = e + } + + return false, err + } + + if rawrc == 0 { + return false, nil + } + + return true, nil +} + // SetBadArchAction sets the default action taken on a syscall for an // architecture not in the filter, or an error if an issue was encountered // setting the value. @@ -953,10 +979,8 @@ func (f *ScmpFilter) SetLogBit(state bool) error { err := f.setFilterAttr(filterAttrLog, toSet) if err != nil { - // Ignore error, if not supported returns apiLevel == 0 - apiLevel, _ := GetAPI() - if apiLevel < 3 { - return fmt.Errorf("setting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher") + if e := checkAPI("SetLogBit", 3, 2, 4, 0); e != nil { + err = e } } @@ -976,9 +1000,52 @@ func (f *ScmpFilter) SetSSB(state bool) error { err := f.setFilterAttr(filterAttrSSB, toSet) if err != nil { - api, apiErr := getAPI() - if (apiErr != nil && api == 0) || (apiErr == nil && api < 4) { - return fmt.Errorf("setting the SSB flag is only supported in libseccomp 2.5.0 and newer with API level 4 or higher") + if e := checkAPI("SetSSB", 4, 2, 5, 0); e != nil { + err = e + } + } + + return err +} + +// SetOptimize sets optimization level of the seccomp filter. By default +// libseccomp generates a set of sequential "if" statements for each rule in +// the filter. SetSyscallPriority can be used to prioritize the order for the +// default cause. The binary tree optimization sorts by syscall numbers and +// generates consistent O(log n) filter traversal for every rule in the filter. +// The binary tree may be advantageous for large filters. Note that +// SetSyscallPriority is ignored when level == 2. +// +// The different optimization levels are: +// 0: Reserved value, not currently used. +// 1: Rules sorted by priority and complexity (DEFAULT). +// 2: Binary tree sorted by syscall number. +func (f *ScmpFilter) SetOptimize(level int) error { + cLevel := C.uint32_t(level) + + err := f.setFilterAttr(filterAttrOptimize, cLevel) + if err != nil { + if e := checkAPI("SetOptimize", 4, 2, 5, 0); e != nil { + err = e + } + } + + return err +} + +// SetRawRC sets whether libseccomp should pass system error codes back to the +// caller, instead of the default ECANCELED. Defaults to false. +func (f *ScmpFilter) SetRawRC(state bool) error { + var toSet C.uint32_t = 0x0 + + if state { + toSet = 0x1 + } + + err := f.setFilterAttr(filterAttrRawRC, toSet) + if err != nil { + if e := checkAPI("SetRawRC", 4, 2, 5, 0); e != nil { + err = e } } @@ -1029,9 +1096,6 @@ func (f *ScmpFilter) AddRuleExact(call ScmpSyscall, action ScmpAction) error { // AddRuleConditional adds a single rule for a conditional action on a syscall. // Returns an error if an issue was encountered adding the rule. // All conditions must match for the rule to match. -// There is a bug in library versions below v2.2.1 which can, in some cases, -// cause conditions to be lost when more than one are used. Consequently, -// AddRuleConditional is disabled on library versions lower than v2.2.1 func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error { return f.addRuleGeneric(call, action, false, conds) } @@ -1043,9 +1107,6 @@ func (f *ScmpFilter) AddRuleConditional(call ScmpSyscall, action ScmpAction, con // The rule will function exactly as described, but it may not function identically // (or be able to be applied to) all architectures. // Returns an error if an issue was encountered adding the rule. -// There is a bug in library versions below v2.2.1 which can, in some cases, -// cause conditions to be lost when more than one are used. Consequently, -// AddRuleConditionalExact is disabled on library versions lower than v2.2.1 func (f *ScmpFilter) AddRuleConditionalExact(call ScmpSyscall, action ScmpAction, conds []ScmpCondition) error { return f.addRuleGeneric(call, action, true, conds) } diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go index 8dc7b296f3b..df4dfb7eba8 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go +++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go @@ -1,11 +1,10 @@ -// +build linux - // Internal functions for libseccomp Go bindings // No exported functions package seccomp import ( + "errors" "fmt" "syscall" ) @@ -27,10 +26,10 @@ import ( #include #include -#if SCMP_VER_MAJOR < 2 -#error Minimum supported version of Libseccomp is v2.2.0 -#elif SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 2 -#error Minimum supported version of Libseccomp is v2.2.0 +#if (SCMP_VER_MAJOR < 2) || \ + (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 3) || \ + (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR == 3 && SCMP_VER_MICRO < 1) +#error This package requires libseccomp >= v2.3.1 #endif #define ARCH_BAD ~0 @@ -65,6 +64,10 @@ const uint32_t C_ARCH_BAD = ARCH_BAD; #define SCMP_ARCH_PARISC64 ARCH_BAD #endif +#ifndef SCMP_ARCH_RISCV64 +#define SCMP_ARCH_RISCV64 ARCH_BAD +#endif + const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE; const uint32_t C_ARCH_X86 = SCMP_ARCH_X86; const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64; @@ -84,6 +87,7 @@ const uint32_t C_ARCH_S390 = SCMP_ARCH_S390; const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X; const uint32_t C_ARCH_PARISC = SCMP_ARCH_PARISC; const uint32_t C_ARCH_PARISC64 = SCMP_ARCH_PARISC64; +const uint32_t C_ARCH_RISCV64 = SCMP_ARCH_RISCV64; #ifndef SCMP_ACT_LOG #define SCMP_ACT_LOG 0x7ffc0000U @@ -113,20 +117,25 @@ const uint32_t C_ACT_NOTIFY = SCMP_ACT_NOTIFY; // The libseccomp SCMP_FLTATR_CTL_LOG member of the scmp_filter_attr enum was // added in v2.4.0 -#if (SCMP_VER_MAJOR < 2) || \ - (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4) +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4 #define SCMP_FLTATR_CTL_LOG _SCMP_FLTATR_MIN #endif + +// The following SCMP_FLTATR_* were added in libseccomp v2.5.0. #if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5 -#define SCMP_FLTATR_CTL_SSB _SCMP_FLTATR_MIN +#define SCMP_FLTATR_CTL_SSB _SCMP_FLTATR_MIN +#define SCMP_FLTATR_CTL_OPTIMIZE _SCMP_FLTATR_MIN +#define SCMP_FLTATR_API_SYSRAWRC _SCMP_FLTATR_MIN #endif -const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT; -const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH; -const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP; -const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC; -const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG; -const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB; +const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT; +const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH; +const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP; +const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC; +const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG; +const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB; +const uint32_t C_ATTRIBUTE_OPTIMIZE = (uint32_t)SCMP_FLTATR_CTL_OPTIMIZE; +const uint32_t C_ATTRIBUTE_SYSRAWRC = (uint32_t)SCMP_FLTATR_API_SYSRAWRC; const int C_CMP_NE = (int)SCMP_CMP_NE; const int C_CMP_LT = (int)SCMP_CMP_LT; @@ -173,8 +182,7 @@ unsigned int get_micro_version() #endif // The libseccomp API level functions were added in v2.4.0 -#if (SCMP_VER_MAJOR < 2) || \ - (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4) +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4 const unsigned int seccomp_api_get(void) { // libseccomp-golang requires libseccomp v2.2.0, at a minimum, which @@ -217,8 +225,7 @@ void add_struct_arg_cmp( } // The seccomp notify API functions were added in v2.5.0 -#if (SCMP_VER_MAJOR < 2) || \ - (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5) +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5 struct seccomp_data { int nr; @@ -270,11 +277,13 @@ type scmpFilterAttr uint32 const ( filterAttrActDefault scmpFilterAttr = iota - filterAttrActBadArch scmpFilterAttr = iota - filterAttrNNP scmpFilterAttr = iota - filterAttrTsync scmpFilterAttr = iota - filterAttrLog scmpFilterAttr = iota - filterAttrSSB scmpFilterAttr = iota + filterAttrActBadArch + filterAttrNNP + filterAttrTsync + filterAttrLog + filterAttrSSB + filterAttrOptimize + filterAttrRawRC ) const ( @@ -282,9 +291,9 @@ const ( scmpError C.int = -1 // Comparison boundaries to check for architecture validity archStart ScmpArch = ArchNative - archEnd ScmpArch = ArchPARISC64 + archEnd ScmpArch = ArchRISCV64 // Comparison boundaries to check for action validity - actionStart ScmpAction = ActKill + actionStart ScmpAction = ActKillThread actionEnd ScmpAction = ActKillProcess // Comparison boundaries to check for comparison operator validity compareOpStart ScmpCompareOp = CompareNotEqual @@ -292,8 +301,9 @@ const ( ) var ( - // Error thrown on bad filter context - errBadFilter = fmt.Errorf("filter is invalid or uninitialized") + // errBadFilter is thrown on bad filter context. + errBadFilter = errors.New("filter is invalid or uninitialized") + errDefAction = errors.New("requested action matches default action of filter") // Constants representing library major, minor, and micro versions verMajor = uint(C.get_major_version()) verMinor = uint(C.get_minor_version()) @@ -302,19 +312,28 @@ var ( // Nonexported functions -// Check if library version is greater than or equal to the given one -func checkVersionAbove(major, minor, micro uint) bool { - return (verMajor > major) || +// checkVersion returns an error if the libseccomp version being used +// is less than the one specified by major, minor, and micro arguments. +// Argument op is an arbitrary non-empty operation description, which +// is used as a part of the error message returned. +// +// Most users should use checkAPI instead. +func checkVersion(op string, major, minor, micro uint) error { + if (verMajor > major) || (verMajor == major && verMinor > minor) || - (verMajor == major && verMinor == minor && verMicro >= micro) + (verMajor == major && verMinor == minor && verMicro >= micro) { + return nil + } + return &VersionError{ + op: op, + major: major, + minor: minor, + micro: micro, + } } -// Ensure that the library is supported, i.e. >= 2.2.0. func ensureSupportedVersion() error { - if !checkVersionAbove(2, 2, 0) { - return VersionError{} - } - return nil + return checkVersion("seccomp", 2, 3, 1) } // Get the API level @@ -406,8 +425,10 @@ func (f *ScmpFilter) addRuleWrapper(call ScmpSyscall, action ScmpAction, exact b switch e := errRc(retCode); e { case syscall.EFAULT: return fmt.Errorf("unrecognized syscall %#x", int32(call)) - case syscall.EPERM: - return fmt.Errorf("requested action matches default action of filter") + // libseccomp >= v2.5.0 returns EACCES, older versions return EPERM. + // TODO: remove EPERM once libseccomp < v2.5.0 is not supported. + case syscall.EPERM, syscall.EACCES: + return errDefAction case syscall.EINVAL: return fmt.Errorf("two checks on same syscall argument") default: @@ -432,14 +453,6 @@ func (f *ScmpFilter) addRuleGeneric(call ScmpSyscall, action ScmpAction, exact b return err } } else { - // We don't support conditional filtering in library version v2.1 - if !checkVersionAbove(2, 2, 1) { - return VersionError{ - message: "conditional filtering is not supported", - minimum: "2.2.1", - } - } - argsArr := C.make_arg_cmp_array(C.uint(len(conds))) if argsArr == nil { return fmt.Errorf("error allocating memory for conditions") @@ -536,6 +549,8 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) { return ArchPARISC, nil case C.C_ARCH_PARISC64: return ArchPARISC64, nil + case C.C_ARCH_RISCV64: + return ArchRISCV64, nil default: return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a)) } @@ -580,6 +595,8 @@ func (a ScmpArch) toNative() C.uint32_t { return C.C_ARCH_PARISC case ArchPARISC64: return C.C_ARCH_PARISC64 + case ArchRISCV64: + return C.C_ARCH_RISCV64 case ArchNative: return C.C_ARCH_NATIVE default: @@ -612,8 +629,6 @@ func (a ScmpCompareOp) toNative() C.int { func actionFromNative(a C.uint32_t) (ScmpAction, error) { aTmp := a & 0xFFFF switch a & 0xFFFF0000 { - case C.C_ACT_KILL: - return ActKill, nil case C.C_ACT_KILL_PROCESS: return ActKillProcess, nil case C.C_ACT_KILL_THREAD: @@ -638,8 +653,6 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) { // Only use with sanitized actions, no error handling func (a ScmpAction) toNative() C.uint32_t { switch a & 0xFFFF { - case ActKill: - return C.C_ACT_KILL case ActKillProcess: return C.C_ACT_KILL_PROCESS case ActKillThread: @@ -676,15 +689,15 @@ func (a scmpFilterAttr) toNative() uint32 { return uint32(C.C_ATTRIBUTE_LOG) case filterAttrSSB: return uint32(C.C_ATTRIBUTE_SSB) + case filterAttrOptimize: + return uint32(C.C_ATTRIBUTE_OPTIMIZE) + case filterAttrRawRC: + return uint32(C.C_ATTRIBUTE_SYSRAWRC) default: return 0x0 } } -func (a ScmpSyscall) toNative() C.uint32_t { - return C.uint32_t(a) -} - func syscallFromNative(a C.int) ScmpSyscall { return ScmpSyscall(a) } @@ -724,9 +737,34 @@ func (scmpResp *ScmpNotifResp) toNative(resp *C.struct_seccomp_notif_resp) { resp.flags = C.__u32(scmpResp.Flags) } +// checkAPI checks that both the API level and the seccomp version is equal to +// or greater than the specified minLevel and major, minor, micro, +// respectively, and returns an error otherwise. Argument op is an arbitrary +// non-empty operation description, used as a part of the error message +// returned. +func checkAPI(op string, minLevel uint, major, minor, micro uint) error { + // Ignore error from getAPI, as it returns level == 0 in case of error. + level, _ := getAPI() + if level >= minLevel { + return checkVersion(op, major, minor, micro) + } + return &VersionError{ + op: op, + curAPI: level, + minAPI: minLevel, + major: major, + minor: minor, + micro: micro, + } +} + // Userspace Notification API // Calls to C.seccomp_notify* hidden from seccomp.go +func notifSupported() error { + return checkAPI("seccomp notification", 6, 2, 5, 0) +} + func (f *ScmpFilter) getNotifFd() (ScmpFd, error) { f.lock.Lock() defer f.lock.Unlock() @@ -734,11 +772,8 @@ func (f *ScmpFilter) getNotifFd() (ScmpFd, error) { if !f.valid { return -1, errBadFilter } - - // Ignore error, if not supported returns apiLevel == 0 - apiLevel, _ := GetAPI() - if apiLevel < 6 { - return -1, fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + if err := notifSupported(); err != nil { + return -1, err } fd := C.seccomp_notify_fd(f.filterCtx) @@ -750,10 +785,8 @@ func notifReceive(fd ScmpFd) (*ScmpNotifReq, error) { var req *C.struct_seccomp_notif var resp *C.struct_seccomp_notif_resp - // Ignore error, if not supported returns apiLevel == 0 - apiLevel, _ := GetAPI() - if apiLevel < 6 { - return nil, fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + if err := notifSupported(); err != nil { + return nil, err } // we only use the request here; the response is unused @@ -789,13 +822,11 @@ func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error { var req *C.struct_seccomp_notif var resp *C.struct_seccomp_notif_resp - // Ignore error, if not supported returns apiLevel == 0 - apiLevel, _ := GetAPI() - if apiLevel < 6 { - return fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + if err := notifSupported(); err != nil { + return err } - // we only use the reponse here; the request is discarded + // we only use the response here; the request is discarded if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 { return errRc(retCode) } @@ -827,10 +858,8 @@ func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error { } func notifIDValid(fd ScmpFd, id uint64) error { - // Ignore error, if not supported returns apiLevel == 0 - apiLevel, _ := GetAPI() - if apiLevel < 6 { - return fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + if err := notifSupported(); err != nil { + return err } for { diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md new file mode 100644 index 00000000000..dea3e409e11 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md @@ -0,0 +1,29 @@ +# LICENSE + +Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go new file mode 100644 index 00000000000..d6f667378a2 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go @@ -0,0 +1,72 @@ +// Copyright (c) 2021-2022, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +var ( + hdrArchUnknown archType = [...]byte{'0', '0', '\x00'} + hdrArch386 archType = [...]byte{'0', '1', '\x00'} + hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'} + hdrArchARM archType = [...]byte{'0', '3', '\x00'} + hdrArchARM64 archType = [...]byte{'0', '4', '\x00'} + hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'} + hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'} + hdrArchMIPS archType = [...]byte{'0', '7', '\x00'} + hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'} + hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'} + hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'} + hdrArchS390x archType = [...]byte{'1', '1', '\x00'} + hdrArchRISCV64 archType = [...]byte{'1', '2', '\x00'} +) + +type archType [3]byte + +// getSIFArch returns the archType corresponding to go runtime arch. +func getSIFArch(arch string) archType { + archMap := map[string]archType{ + "386": hdrArch386, + "amd64": hdrArchAMD64, + "arm": hdrArchARM, + "arm64": hdrArchARM64, + "ppc64": hdrArchPPC64, + "ppc64le": hdrArchPPC64le, + "mips": hdrArchMIPS, + "mipsle": hdrArchMIPSle, + "mips64": hdrArchMIPS64, + "mips64le": hdrArchMIPS64le, + "s390x": hdrArchS390x, + "riscv64": hdrArchRISCV64, + } + + t, ok := archMap[arch] + if !ok { + return hdrArchUnknown + } + return t +} + +// GoArch returns the go runtime arch corresponding to t. +func (t archType) GoArch() string { + archMap := map[archType]string{ + hdrArch386: "386", + hdrArchAMD64: "amd64", + hdrArchARM: "arm", + hdrArchARM64: "arm64", + hdrArchPPC64: "ppc64", + hdrArchPPC64le: "ppc64le", + hdrArchMIPS: "mips", + hdrArchMIPSle: "mipsle", + hdrArchMIPS64: "mips64", + hdrArchMIPS64le: "mips64le", + hdrArchS390x: "s390x", + hdrArchRISCV64: "riscv64", + } + + arch, ok := archMap[t] + if !ok { + arch = "unknown" + } + return arch +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go new file mode 100644 index 00000000000..d706fb1a59c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go @@ -0,0 +1,103 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "io" +) + +// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The +// zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte + pos int64 +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial contents. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +var errNegativeOffset = errors.New("negative offset") + +// ReadAt implements the io.ReaderAt interface. +func (b *Buffer) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, errNegativeOffset + } + + if off >= int64(len(b.buf)) { + return 0, io.EOF + } + + n = copy(p, b.buf[off:]) + if n < len(p) { + err = io.EOF + } + return n, err +} + +var errNegativePosition = errors.New("negative position") + +// Write implements the io.Writer interface. +func (b *Buffer) Write(p []byte) (n int, err error) { + if b.pos < 0 { + return 0, errNegativePosition + } + + if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need { + b.buf = append(b.buf, make([]byte, need-have)...) + } + + n = copy(b.buf[b.pos:], p) + b.pos += int64(n) + return n, nil +} + +var errInvalidWhence = errors.New("invalid whence") + +// Seek implements the io.Seeker interface. +func (b *Buffer) Seek(offset int64, whence int) (int64, error) { + var abs int64 + + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = b.pos + offset + case io.SeekEnd: + abs = int64(len(b.buf)) + offset + default: + return 0, errInvalidWhence + } + + if abs < 0 { + return 0, errNegativePosition + } + + b.pos = abs + return abs, nil +} + +var errTruncateRange = errors.New("truncation out of range") + +// Truncate discards all but the first n bytes from the buffer. +func (b *Buffer) Truncate(n int64) error { + if n < 0 || n > int64(len(b.buf)) { + return errTruncateRange + } + + b.buf = b.buf[:n] + return nil +} + +// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer +// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate). +func (b *Buffer) Bytes() []byte { return b.buf } + +// Len returns the number of bytes in the buffer. +func (b *Buffer) Len() int64 { return int64(len(b.buf)) } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go new file mode 100644 index 00000000000..e65bdb7476d --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -0,0 +1,680 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/google/uuid" +) + +// nextAligned finds the next offset that satisfies alignment. +func nextAligned(offset int64, alignment int) int64 { + align64 := uint64(alignment) + offset64 := uint64(offset) + + if align64 != 0 && offset64%align64 != 0 { + offset64 = (offset64 & ^(align64 - 1)) + align64 + } + + return int64(offset64) +} + +// writeDataObjectAt writes the data object described by di to ws, using time t, recording details +// in d. The object is written at the first position that satisfies the alignment requirements +// described by di following offsetUnaligned. +func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll + offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart) + if err != nil { + return err + } + + n, err := io.Copy(ws, di.r) + if err != nil { + return err + } + + if err := di.fillDescriptor(t, d); err != nil { + return err + } + d.Used = true + d.Offset = offset + d.Size = n + d.SizeWithPadding = offset - offsetUnaligned + n + + return nil +} + +var ( + errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") + errPrimaryPartition = errors.New("image already contains a primary partition") +) + +// writeDataObject writes the data object described by di to f, using time t, recording details in +// the descriptor at index i. +func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error { + if i >= len(f.rds) { + return errInsufficientCapacity + } + + // If this is a primary partition, verify there isn't another primary partition, and update the + // architecture in the global header. + if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys { + if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 { + return errPrimaryPartition + } + + f.h.Arch = p.Arch + } + + d := &f.rds[i] + d.ID = uint32(i) + 1 + + if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil { + return err + } + + // Update minimum object ID map. + if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID { + f.minIDs[d.GroupID] = d.ID + } + + f.h.DescriptorsFree-- + f.h.DataSize += d.SizeWithPadding + + return nil +} + +// writeDescriptors writes the descriptors in f to backing storage. +func (f *FileImage) writeDescriptors() error { + if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.rds) +} + +// writeHeader writes the the global header in f to backing storage. +func (f *FileImage) writeHeader() error { + if _, err := f.rw.Seek(0, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.h) +} + +// createOpts accumulates container creation options. +type createOpts struct { + launchScript [hdrLaunchLen]byte + id uuid.UUID + descriptorsOffset int64 + descriptorCapacity int64 + dis []DescriptorInput + t time.Time + closeOnUnload bool +} + +// CreateOpt are used to specify container creation options. +type CreateOpt func(*createOpts) error + +var errLaunchScriptLen = errors.New("launch script too large") + +// OptCreateWithLaunchScript specifies s as the launch script. +func OptCreateWithLaunchScript(s string) CreateOpt { + return func(co *createOpts) error { + b := []byte(s) + + if len(b) >= len(co.launchScript) { + return errLaunchScriptLen + } + + copy(co.launchScript[:], b) + + return nil + } +} + +// OptCreateDeterministic sets header/descriptor fields to values that support deterministic +// creation of images. +func OptCreateDeterministic() CreateOpt { + return func(co *createOpts) error { + co.id = uuid.Nil + co.t = time.Time{} + return nil + } +} + +// OptCreateWithID specifies id as the unique ID. +func OptCreateWithID(id string) CreateOpt { + return func(co *createOpts) error { + id, err := uuid.Parse(id) + co.id = id + return err + } +} + +// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a +// maximum of n descriptors. +func OptCreateWithDescriptorCapacity(n int64) CreateOpt { + return func(co *createOpts) error { + co.descriptorCapacity = n + return nil + } +} + +// OptCreateWithDescriptors appends dis to the list of descriptors. +func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt { + return func(co *createOpts) error { + co.dis = append(co.dis, dis...) + return nil + } +} + +// OptCreateWithTime specifies t as the image creation time. +func OptCreateWithTime(t time.Time) CreateOpt { + return func(co *createOpts) error { + co.t = t + return nil + } +} + +// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptCreateWithCloseOnUnload(b bool) CreateOpt { + return func(co *createOpts) error { + co.closeOnUnload = b + return nil + } +} + +// createContainer creates a new SIF container file in rw, according to opts. +func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { + rds := make([]rawDescriptor, co.descriptorCapacity) + rdsSize := int64(binary.Size(rds)) + + h := header{ + LaunchScript: co.launchScript, + Magic: hdrMagic, + Version: CurrentVersion.bytes(), + Arch: hdrArchUnknown, + ID: co.id, + CreatedAt: co.t.Unix(), + ModifiedAt: co.t.Unix(), + DescriptorsFree: co.descriptorCapacity, + DescriptorsTotal: co.descriptorCapacity, + DescriptorsOffset: co.descriptorsOffset, + DescriptorsSize: rdsSize, + DataOffset: co.descriptorsOffset + rdsSize, + } + + f := &FileImage{ + rw: rw, + h: h, + rds: rds, + minIDs: make(map[uint32]uint32), + } + + for i, di := range co.dis { + if err := f.writeDataObject(i, di, co.t); err != nil { + return nil, err + } + } + + if err := f.writeDescriptors(); err != nil { + return nil, err + } + + if err := f.writeHeader(); err != nil { + return nil, err + } + + return f, nil +} + +// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects +// can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptCreateWithCloseOnUnload. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) { + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + + co := createOpts{ + id: id, + descriptorsOffset: 4096, + descriptorCapacity: 48, + t: time.Now(), + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&co); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := createContainer(rw, co) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = co.closeOnUnload + return f, nil +} + +// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more +// data objects can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) { + fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := CreateContainer(fp, opts...) + if err != nil { + fp.Close() + os.Remove(fp.Name()) + + return nil, err + } + + f.closeOnUnload = true + return f, nil +} + +func zeroData(fimg *FileImage, descr *rawDescriptor) error { + // first, move to data object offset + if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil { + return err + } + + var zero [4096]byte + n := descr.Size + upbound := int64(4096) + for { + if n < 4096 { + upbound = n + } + + if _, err := fimg.rw.Write(zero[:upbound]); err != nil { + return err + } + n -= 4096 + if n <= 0 { + break + } + } + + return nil +} + +func resetDescriptor(fimg *FileImage, index int) error { + // If we remove the primary partition, set the global header Arch field to HdrArchUnknown + // to indicate that the SIF file doesn't include a primary partition and no dependency + // on any architecture exists. + if fimg.rds[index].isPartitionOfType(PartPrimSys) { + fimg.h.Arch = hdrArchUnknown + } + + offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0])) + + // first, move to descriptor offset + if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil { + return err + } + + var emptyDesc rawDescriptor + return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc) +} + +// addOpts accumulates object add options. +type addOpts struct { + t time.Time +} + +// AddOpt are used to specify object add options. +type AddOpt func(*addOpts) error + +// OptAddDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptAddDeterministic() AddOpt { + return func(ao *addOpts) error { + ao.t = time.Time{} + return nil + } +} + +// OptAddWithTime specifies t as the image modification time. +func OptAddWithTime(t time.Time) AddOpt { + return func(ao *addOpts) error { + ao.t = t + return nil + } +} + +// AddObject adds a new data object and its descriptor into the specified SIF file. +// +// By default, the image modification time is set to the current time. To override this, consider +// using OptAddDeterministic or OptAddWithTime. +func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { + ao := addOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&ao); err != nil { + return fmt.Errorf("%w", err) + } + } + + // Find an unused descriptor. + i := 0 + for _, rd := range f.rds { + if !rd.Used { + break + } + i++ + } + + if err := f.writeDataObject(i, di, ao.t); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = ao.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// isLast return true if the data object associated with d is the last in f. +func (f *FileImage) isLast(d *rawDescriptor) bool { + isLast := true + + end := d.Offset + d.Size + f.WithDescriptors(func(d Descriptor) bool { + isLast = d.Offset()+d.Size() <= end + return !isLast + }) + + return isLast +} + +// truncateAt truncates f at the start of the padded data object described by d. +func (f *FileImage) truncateAt(d *rawDescriptor) error { + start := d.Offset + d.Size - d.SizeWithPadding + + if err := f.rw.Truncate(start); err != nil { + return err + } + + return nil +} + +// deleteOpts accumulates object deletion options. +type deleteOpts struct { + zero bool + compact bool + t time.Time +} + +// DeleteOpt are used to specify object deletion options. +type DeleteOpt func(*deleteOpts) error + +// OptDeleteZero specifies whether the deleted object should be zeroed. +func OptDeleteZero(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.zero = b + return nil + } +} + +// OptDeleteCompact specifies whether the image should be compacted following object deletion. +func OptDeleteCompact(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.compact = b + return nil + } +} + +// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptDeleteDeterministic() DeleteOpt { + return func(do *deleteOpts) error { + do.t = time.Time{} + return nil + } +} + +// OptDeleteWithTime specifies t as the image modification time. +func OptDeleteWithTime(t time.Time) DeleteOpt { + return func(do *deleteOpts) error { + do.t = t + return nil + } +} + +var errCompactNotImplemented = errors.New("compact not implemented for non-last object") + +// DeleteObject deletes the data object with id, according to opts. +// +// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following +// object deletion, use OptDeleteCompact. +// +// By default, the image modification time is set to time.Now(). To override this, consider using +// OptDeleteDeterministic or OptDeleteWithTime. +func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { + do := deleteOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&do); err != nil { + return fmt.Errorf("%w", err) + } + } + + d, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if do.compact && !f.isLast(d) { + return fmt.Errorf("%w", errCompactNotImplemented) + } + + if do.zero { + if err := zeroData(f, d); err != nil { + return fmt.Errorf("%w", err) + } + } + + if do.compact { + if err := f.truncateAt(d); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.DataSize -= d.SizeWithPadding + } + + f.h.DescriptorsFree++ + f.h.ModifiedAt = do.t.Unix() + + index := 0 + for i, od := range f.rds { + if od.ID == id { + index = i + break + } + } + + if err := resetDescriptor(f, index); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// setOpts accumulates object set options. +type setOpts struct { + t time.Time +} + +// SetOpt are used to specify object set options. +type SetOpt func(*setOpts) error + +// OptSetDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptSetDeterministic() SetOpt { + return func(so *setOpts) error { + so.t = time.Time{} + return nil + } +} + +// OptSetWithTime specifies t as the image/object modification time. +func OptSetWithTime(t time.Time) SetOpt { + return func(so *setOpts) error { + so.t = t + return nil + } +} + +var ( + errNotPartition = errors.New("data object not a partition") + errNotSystem = errors.New("data object not a system partition") +) + +// SetPrimPart sets the specified system partition to be the primary one. +// +// By default, the image/object modification times are set to time.Now(). To override this, +// consider using OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { + so := setOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + descr, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if descr.DataType != DataPartition { + return fmt.Errorf("%w", errNotPartition) + } + + fs, pt, arch, err := descr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + // if already primary system partition, nothing to do + if pt == PartPrimSys { + return nil + } + + if pt != PartSystem { + return fmt.Errorf("%w", errNotSystem) + } + + olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) + if err != nil && !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) + } + + f.h.Arch = getSIFArch(arch) + + extra := partition{ + Fstype: fs, + Parttype: PartPrimSys, + } + copy(extra.Arch[:], arch) + + if err := descr.setExtra(extra); err != nil { + return fmt.Errorf("%w", err) + } + + if olddescr != nil { + oldfs, _, oldarch, err := olddescr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + oldextra := partition{ + Fstype: oldfs, + Parttype: PartSystem, + Arch: getSIFArch(oldarch), + } + + if err := olddescr.setExtra(oldextra); err != nil { + return fmt.Errorf("%w", err) + } + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go new file mode 100644 index 00000000000..da7a6a7c7be --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -0,0 +1,267 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "bytes" + "crypto" + "encoding/binary" + "errors" + "fmt" + "io" + "strings" + "time" +) + +// rawDescriptor represents an on-disk object descriptor. +type rawDescriptor struct { + DataType DataType + Used bool + ID uint32 + GroupID uint32 + LinkedID uint32 + Offset int64 + Size int64 + SizeWithPadding int64 + + CreatedAt int64 + ModifiedAt int64 + UID int64 // Deprecated: UID exists for historical compatibility and should not be used. + GID int64 // Deprecated: GID exists for historical compatibility and should not be used. + Name [descrNameLen]byte + Extra [descrMaxPrivLen]byte +} + +// partition represents the SIF partition data object descriptor. +type partition struct { + Fstype FSType + Parttype PartType + Arch archType +} + +// signature represents the SIF signature data object descriptor. +type signature struct { + Hashtype hashType + Entity [descrEntityLen]byte +} + +// cryptoMessage represents the SIF crypto message object descriptor. +type cryptoMessage struct { + Formattype FormatType + Messagetype MessageType +} + +var errNameTooLarge = errors.New("name value too large") + +// setName encodes name into the name field of d. +func (d *rawDescriptor) setName(name string) error { + if len(name) > len(d.Name) { + return errNameTooLarge + } + + for i := copy(d.Name[:], name); i < len(d.Name); i++ { + d.Name[i] = 0 + } + + return nil +} + +var errExtraTooLarge = errors.New("extra value too large") + +// setExtra encodes v into the extra field of d. +func (d *rawDescriptor) setExtra(v interface{}) error { + if v == nil { + return nil + } + + if binary.Size(v) > len(d.Extra) { + return errExtraTooLarge + } + + b := new(bytes.Buffer) + if err := binary.Write(b, binary.LittleEndian, v); err != nil { + return err + } + + for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ { + d.Extra[i] = 0 + } + + return nil +} + +// getPartitionMetadata gets metadata for a partition data object. +func (d rawDescriptor) getPartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + if got, want := d.DataType, DataPartition; got != want { + return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}} + } + + var p partition + + b := bytes.NewReader(d.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &p); err != nil { + return 0, 0, "", fmt.Errorf("%w", err) + } + + return p.Fstype, p.Parttype, p.Arch.GoArch(), nil +} + +// isPartitionOfType returns true if d is a partition data object of type pt. +func (d rawDescriptor) isPartitionOfType(pt PartType) bool { + _, t, _, err := d.getPartitionMetadata() + if err != nil { + return false + } + return t == pt +} + +// Descriptor represents the SIF descriptor type. +type Descriptor struct { + r io.ReaderAt // Backing storage. + + raw rawDescriptor // Raw descriptor from image. + + relativeID uint32 // ID relative to minimum ID of object group. +} + +// DataType returns the type of data object. +func (d Descriptor) DataType() DataType { return d.raw.DataType } + +// ID returns the data object ID of d. +func (d Descriptor) ID() uint32 { return d.raw.ID } + +// GroupID returns the data object group ID of d, or zero if d is not part of a data object +// group. +func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask } + +// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked +// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a +// data object ID. +func (d Descriptor) LinkedID() (id uint32, isGroup bool) { + return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask +} + +// Offset returns the offset of the data object. +func (d Descriptor) Offset() int64 { return d.raw.Offset } + +// Size returns the data object size. +func (d Descriptor) Size() int64 { return d.raw.Size } + +// CreatedAt returns the creation time of the data object. +func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) } + +// ModifiedAt returns the modification time of the data object. +func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) } + +// Name returns the name of the data object. +func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") } + +// PartitionMetadata gets metadata for a partition data object. +func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + return d.raw.getPartitionMetadata() +} + +var errHashUnsupported = errors.New("hash algorithm unsupported") + +// getHashType converts ht into a crypto.Hash. +func getHashType(ht hashType) (crypto.Hash, error) { + switch ht { + case hashSHA256: + return crypto.SHA256, nil + case hashSHA384: + return crypto.SHA384, nil + case hashSHA512: + return crypto.SHA512, nil + case hashBLAKE2S: + return crypto.BLAKE2s_256, nil + case hashBLAKE2B: + return crypto.BLAKE2b_256, nil + } + return 0, errHashUnsupported +} + +// SignatureMetadata gets metadata for a signature data object. +func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) { + if got, want := d.raw.DataType, DataSignature; got != want { + return ht, fp, &unexpectedDataTypeError{got, []DataType{want}} + } + + var s signature + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &s); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + if ht, err = getHashType(s.Hashtype); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + fp = make([]byte, 20) + copy(fp, s.Entity[:]) + + return ht, fp, nil +} + +// CryptoMessageMetadata gets metadata for a crypto message data object. +func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) { + if got, want := d.raw.DataType, DataCryptoMessage; got != want { + return 0, 0, &unexpectedDataTypeError{got, []DataType{want}} + } + + var m cryptoMessage + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &m); err != nil { + return 0, 0, fmt.Errorf("%w", err) + } + + return m.Formattype, m.Messagetype, nil +} + +// GetData returns the data object associated with descriptor d. +func (d Descriptor) GetData() ([]byte, error) { + b := make([]byte, d.raw.Size) + if _, err := io.ReadFull(d.GetReader(), b); err != nil { + return nil, err + } + return b, nil +} + +// GetReader returns a io.Reader that reads the data object associated with descriptor d. +func (d Descriptor) GetReader() io.Reader { + return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size) +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d. +func (d Descriptor) GetIntegrityReader() io.Reader { + fields := []interface{}{ + d.raw.DataType, + d.raw.Used, + d.relativeID, + d.raw.LinkedID, + d.raw.Size, + d.raw.CreatedAt, + d.raw.UID, + d.raw.GID, + } + + // Encode endian-sensitive fields. + data := bytes.Buffer{} + for _, f := range fields { + if err := binary.Write(&data, binary.LittleEndian, f); err != nil { + panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error. + } + } + + return io.MultiReader( + &data, + bytes.NewReader(d.raw.Name[:]), + bytes.NewReader(d.raw.Extra[:]), + ) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go new file mode 100644 index 00000000000..c55cf51f9de --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -0,0 +1,300 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "crypto" + "errors" + "fmt" + "io" + "os" + "time" +) + +// descriptorOpts accumulates data object options. +type descriptorOpts struct { + groupID uint32 + linkID uint32 + alignment int + name string + extra interface{} + t time.Time +} + +// DescriptorInputOpt are used to specify data object options. +type DescriptorInputOpt func(DataType, *descriptorOpts) error + +// OptNoGroup specifies the data object is not contained within a data object group. +func OptNoGroup() DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.groupID = 0 + return nil + } +} + +// OptGroupID specifies groupID as data object group ID. +func OptGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.groupID = groupID + return nil + } +} + +// OptLinkedID specifies that the data object is linked to the data object with the specified ID. +func OptLinkedID(id uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if id == 0 { + return ErrInvalidObjectID + } + opts.linkID = id + return nil + } +} + +// OptLinkedGroupID specifies that the data object is linked to the data object group with the +// specified groupID. +func OptLinkedGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.linkID = groupID | descrGroupMask + return nil + } +} + +// OptObjectAlignment specifies n as the data alignment requirement. +func OptObjectAlignment(n int) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.alignment = n + return nil + } +} + +// OptObjectName specifies name as the data object name. +func OptObjectName(name string) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.name = name + return nil + } +} + +// OptObjectTime specifies t as the data object creation time. +func OptObjectTime(t time.Time) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.t = t + return nil + } +} + +type unexpectedDataTypeError struct { + got DataType + want []DataType +} + +func (e *unexpectedDataTypeError) Error() string { + return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want) +} + +func (e *unexpectedDataTypeError) Is(target error) bool { + //nolint:errorlint // don't compare wrapped errors in Is() + t, ok := target.(*unexpectedDataTypeError) + if !ok { + return false + } + + if len(t.want) > 0 { + // Use a map to check that the "want" errors in e and t contain the same values, ignoring + // any ordering differences. + acc := make(map[DataType]int, len(t.want)) + + // Increment counter for each data type in e. + for _, dt := range e.want { + if _, ok := acc[dt]; !ok { + acc[dt] = 0 + } + acc[dt]++ + } + + // Decrement counter for each data type in e. + for _, dt := range t.want { + if _, ok := acc[dt]; !ok { + return false + } + acc[dt]-- + } + + // If the "want" errors in e and t are equivalent, all counters should be zero. + for _, n := range acc { + if n != 0 { + return false + } + } + } + + return (e.got == t.got || t.got == 0) +} + +// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set +// to ft, and the message type is set to mt. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataCryptoMessage; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + m := cryptoMessage{ + Formattype: ft, + Messagetype: mt, + } + + opts.extra = m + return nil + } +} + +var errUnknownArchitcture = errors.New("unknown architecture") + +// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to +// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch +// should be the architecture as represented by the Go runtime. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataPartition; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + sifarch := getSIFArch(arch) + if sifarch == hdrArchUnknown { + return fmt.Errorf("%w: %v", errUnknownArchitcture, arch) + } + + p := partition{ + Fstype: fs, + Parttype: pt, + Arch: sifarch, + } + + opts.extra = p + return nil + } +} + +// sifHashType converts h into a HashType. +func sifHashType(h crypto.Hash) hashType { + switch h { + case crypto.SHA256: + return hashSHA256 + case crypto.SHA384: + return hashSHA384 + case crypto.SHA512: + return hashSHA512 + case crypto.BLAKE2s_256: + return hashBLAKE2S + case crypto.BLAKE2b_256: + return hashBLAKE2B + } + return 0 +} + +// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and +// the signing entity fingerprint is set to fp. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataSignature; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + s := signature{ + Hashtype: sifHashType(ht), + } + copy(s.Entity[:], fp) + + opts.extra = s + return nil + } +} + +// DescriptorInput describes a new data object. +type DescriptorInput struct { + dt DataType + r io.Reader + opts descriptorOpts +} + +// DefaultObjectGroup is the default group that data objects are placed in. +const DefaultObjectGroup = 1 + +// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents +// read from r, configured according to opts. +// +// It is possible (and often necessary) to store additional metadata related to certain types of +// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata, +// and OptSignatureMetadata for this purpose. +// +// By default, the data object will be placed in the default data object group (1). To override +// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or +// OptLinkedGroupID. +// +// By default, the data object will be aligned according to the system's memory page size. To +// override this behavior, consider using OptObjectAlignment. +// +// By default, no name is set for data object. To set a name, use OptObjectName. +// +// When creating a new image, data object creation/modification times are set to the image creation +// time. When modifying an existing image, the data object creation/modification time is set to the +// image modification time. To override this behavior, consider using OptObjectTime. +func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) { + dopts := descriptorOpts{ + groupID: DefaultObjectGroup, + alignment: os.Getpagesize(), + } + + for _, opt := range opts { + if err := opt(t, &dopts); err != nil { + return DescriptorInput{}, fmt.Errorf("%w", err) + } + } + + di := DescriptorInput{ + dt: t, + r: r, + opts: dopts, + } + + return di, nil +} + +// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t. +func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error { + d.DataType = di.dt + d.GroupID = di.opts.groupID | descrGroupMask + d.LinkedID = di.opts.linkID + + if !di.opts.t.IsZero() { + t = di.opts.t + } + d.CreatedAt = t.Unix() + d.ModifiedAt = t.Unix() + + d.UID = 0 + d.GID = 0 + + if err := d.setName(di.opts.name); err != nil { + return err + } + + return d.setExtra(di.opts.extra) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go new file mode 100644 index 00000000000..75266e1945c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go @@ -0,0 +1,174 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" +) + +var ( + errInvalidMagic = errors.New("invalid SIF magic") + errIncompatibleVersion = errors.New("incompatible SIF version") +) + +// isValidSif looks at key fields from the global header to assess SIF validity. +func isValidSif(f *FileImage) error { + if f.h.Magic != hdrMagic { + return errInvalidMagic + } + + if f.h.Version != CurrentVersion.bytes() { + return errIncompatibleVersion + } + + return nil +} + +// populateMinIDs populates the minIDs field of f. +func (f *FileImage) populateMinIDs() { + f.minIDs = make(map[uint32]uint32) + f.WithDescriptors(func(d Descriptor) bool { + if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID { + f.minIDs[d.raw.GroupID] = d.ID() + } + return false + }) +} + +// loadContainer loads a SIF image from rw. +func loadContainer(rw ReadWriter) (*FileImage, error) { + f := FileImage{rw: rw} + + // Read global header. + err := binary.Read( + io.NewSectionReader(rw, 0, int64(binary.Size(f.h))), + binary.LittleEndian, + &f.h, + ) + if err != nil { + return nil, fmt.Errorf("reading global header: %w", err) + } + + if err := isValidSif(&f); err != nil { + return nil, err + } + + // Read descriptors. + f.rds = make([]rawDescriptor, f.h.DescriptorsTotal) + err = binary.Read( + io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize), + binary.LittleEndian, + &f.rds, + ) + if err != nil { + return nil, fmt.Errorf("reading descriptors: %w", err) + } + + f.populateMinIDs() + + return &f, nil +} + +// loadOpts accumulates container loading options. +type loadOpts struct { + flag int + closeOnUnload bool +} + +// LoadOpt are used to specify container loading options. +type LoadOpt func(*loadOpts) error + +// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file. +func OptLoadWithFlag(flag int) LoadOpt { + return func(lo *loadOpts) error { + lo.flag = flag + return nil + } +} + +// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptLoadWithCloseOnUnload(b bool) LoadOpt { + return func(lo *loadOpts) error { + lo.closeOnUnload = b + return nil + } +} + +// LoadContainerFromPath loads a new SIF container from path, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the file is opened for read and write access. To change this behavior, consider +// using OptLoadWithFlag. +func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + flag: os.O_RDWR, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + fp, err := os.OpenFile(path, lo.flag, 0) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := loadContainer(fp) + if err != nil { + fp.Close() + + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = true + return f, nil +} + +// LoadContainer loads a new SIF container from rw, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptLoadWithCloseOnUnload. +func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := loadContainer(rw) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = lo.closeOnUnload + return f, nil +} + +// UnloadContainer unloads f, releasing associated resources. +func (f *FileImage) UnloadContainer() error { + if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload { + if err := c.Close(); err != nil { + return fmt.Errorf("%w", err) + } + } + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go new file mode 100644 index 00000000000..635d6e89cfb --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go @@ -0,0 +1,210 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "fmt" +) + +// ErrNoObjects is the error returned when an image contains no data objects. +var ErrNoObjects = errors.New("no objects in image") + +// ErrObjectNotFound is the error returned when a data object is not found. +var ErrObjectNotFound = errors.New("object not found") + +// ErrMultipleObjectsFound is the error returned when multiple data objects are found. +var ErrMultipleObjectsFound = errors.New("multiple objects found") + +// ErrInvalidObjectID is the error returned when an invalid object ID is supplied. +var ErrInvalidObjectID = errors.New("invalid object ID") + +// ErrInvalidGroupID is the error returned when an invalid group ID is supplied. +var ErrInvalidGroupID = errors.New("invalid group ID") + +// DescriptorSelectorFunc returns true if d matches, and false otherwise. +type DescriptorSelectorFunc func(d Descriptor) (bool, error) + +// WithDataType selects descriptors that have data type dt. +func WithDataType(dt DataType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.DataType() == dt, nil + } +} + +// WithID selects descriptors with a matching ID. +func WithID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + return d.ID() == id, nil + } +} + +// WithNoGroup selects descriptors that are not contained within an object group. +func WithNoGroup() DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.GroupID() == 0, nil + } +} + +// WithGroupID returns a selector func that selects descriptors with a matching groupID. +func WithGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + return d.GroupID() == groupID, nil + } +} + +// WithLinkedID selects descriptors that are linked to the data object with specified ID. +func WithLinkedID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + linkedID, isGroup := d.LinkedID() + return !isGroup && linkedID == id, nil + } +} + +// WithLinkedGroupID selects descriptors that are linked to the data object group with specified +// ID. +func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + linkedID, isGroup := d.LinkedID() + return isGroup && linkedID == groupID, nil + } +} + +// WithPartitionType selects descriptors containing a partition of type pt. +func WithPartitionType(pt PartType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.raw.isPartitionOfType(pt), nil + } +} + +// descriptorFromRaw populates a Descriptor from rd. +func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor { + return Descriptor{ + raw: *rd, + r: f.rw, + relativeID: rd.ID - f.minIDs[rd.GroupID], + } +} + +// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true. +// If the image contains no data objects, an error wrapping ErrNoObjects is returned. +func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return nil, fmt.Errorf("%w", ErrNoObjects) + } + + var ds []Descriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error { + ds = append(ds, f.descriptorFromRaw(d)) + return nil + }) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + return ds, nil +} + +// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is +// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns, +// ErrMultipleObjectsFound is returned. +func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) { + var d *rawDescriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error { + if d != nil { + return ErrMultipleObjectsFound + } + d = found + return nil + }) + + if err == nil && d == nil { + err = ErrObjectNotFound + } + + return d, err +} + +// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data +// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an +// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an +// error wrapping ErrMultipleObjectsFound is returned. +func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return Descriptor{}, fmt.Errorf("%w", ErrNoObjects) + } + + d, err := f.getDescriptor(fns...) + if err != nil { + return Descriptor{}, fmt.Errorf("%w", err) + } + + return f.descriptorFromRaw(d), nil +} + +// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns +// select the descriptor. +func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + for _, fn := range fns { + if ok, err := fn(d); !ok || err != nil { + return ok, err + } + } + return true, nil + } +} + +// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns +// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is +// returned to the caller. +func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error { + for i, d := range f.rds { + if !d.Used { + continue + } + + if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil { + return err + } else if !ok { + continue + } + + if err := onMatchFn(&f.rds[i]); err != nil { + return err + } + } + + return nil +} + +var errAbort = errors.New("abort") + +// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can +// be used as a no-op matchFn. +func abortOnMatch(*rawDescriptor) error { return errAbort } + +// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true. +func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) { + selectFn := func(d Descriptor) (bool, error) { + return fn(d), nil + } + _ = f.withDescriptors(selectFn, abortOnMatch) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go new file mode 100644 index 00000000000..704acee4a0e --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -0,0 +1,364 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +// Package sif implements data structures and routines to create +// and access SIF files. +// +// Layout of a SIF file (example): +// +// .================================================. +// | GLOBAL HEADER: Sifheader | +// | - launch: "#!/usr/bin/env..." | +// | - magic: "SIF_MAGIC" | +// | - version: "1" | +// | - arch: "4" | +// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e | +// | - ctime: 1504657553 | +// | - mtime: 1504657653 | +// | - ndescr: 3 | +// | - descroff: 120 | --. +// | - descrlen: 432 | | +// | - dataoff: 4096 | | +// | - datalen: 619362 | | +// |------------------------------------------------| <-' +// | DESCR[0]: Sifdeffile | +// | - Sifcommon | +// | - datatype: DATA_DEFFILE | +// | - id: 1 | +// | - groupid: 1 | +// | - link: NONE | +// | - fileoff: 4096 | --. +// | - filelen: 222 | | +// |------------------------------------------------| <-----. +// | DESCR[1]: Sifpartition | | | +// | - Sifcommon | | | +// | - datatype: DATA_PARTITION | | | +// | - id: 2 | | | +// | - groupid: 1 | | | +// | - link: NONE | | | +// | - fileoff: 4318 | ----. | +// | - filelen: 618496 | | | | +// | - fstype: Squashfs | | | | +// | - parttype: System | | | | +// | - content: Linux | | | | +// |------------------------------------------------| | | | +// | DESCR[2]: Sifsignature | | | | +// | - Sifcommon | | | | +// | - datatype: DATA_SIGNATURE | | | | +// | - id: 3 | | | | +// | - groupid: NONE | | | | +// | - link: 2 | ------' +// | - fileoff: 622814 | ------. +// | - filelen: 644 | | | | +// | - hashtype: SHA384 | | | | +// | - entity: @ | | | | +// |------------------------------------------------| <-' | | +// | Definition file data | | | +// | . | | | +// | . | | | +// | . | | | +// |------------------------------------------------| <---' | +// | File system partition image | | +// | . | | +// | . | | +// | . | | +// |------------------------------------------------| <-----' +// | Signed verification data | +// | . | +// | . | +// | . | +// `================================================' +// +package sif + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/google/uuid" +) + +// SIF header constants and quantities. +const ( + hdrLaunchLen = 32 // len("#!/usr/bin/env... ") + hdrMagicLen = 10 // len("SIF_MAGIC") + hdrVersionLen = 3 // len("99") +) + +var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'} + +// SpecVersion specifies a SIF specification version. +type SpecVersion uint8 + +func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) } + +// bytes returns the value of b, formatted for direct inclusion in a SIF header. +func (v SpecVersion) bytes() [hdrVersionLen]byte { + var b [3]byte + copy(b[:], fmt.Sprintf("%02d", v)) + return b +} + +// SIF specification versions. +const ( + version01 SpecVersion = iota + 1 +) + +// CurrentVersion specifies the current SIF specification version. +const CurrentVersion = version01 + +const ( + descrGroupMask = 0xf0000000 // groups start at that offset + descrEntityLen = 256 // len("Joe Bloe ...") + descrNameLen = 128 // descriptor name (string identifier) + descrMaxPrivLen = 384 // size reserved for descriptor specific data +) + +// DataType represents the different SIF data object types stored in the image. +type DataType int32 + +// List of supported SIF data types. +const ( + DataDeffile DataType = iota + 0x4001 // definition file data object + DataEnvVar // environment variables data object + DataLabels // JSON labels data object + DataPartition // file system data object + DataSignature // signing/verification data object + DataGenericJSON // generic JSON meta-data + DataGeneric // generic / raw data + DataCryptoMessage // cryptographic message data object +) + +// String returns a human-readable representation of t. +func (t DataType) String() string { + switch t { + case DataDeffile: + return "Def.FILE" + case DataEnvVar: + return "Env.Vars" + case DataLabels: + return "JSON.Labels" + case DataPartition: + return "FS" + case DataSignature: + return "Signature" + case DataGenericJSON: + return "JSON.Generic" + case DataGeneric: + return "Generic/Raw" + case DataCryptoMessage: + return "Cryptographic Message" + } + return "Unknown" +} + +// FSType represents the different SIF file system types found in partition data objects. +type FSType int32 + +// List of supported file systems. +const ( + FsSquash FSType = iota + 1 // Squashfs file system, RDONLY + FsExt3 // EXT3 file system, RDWR (deprecated) + FsImmuObj // immutable data object archive + FsRaw // raw data + FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY +) + +// String returns a human-readable representation of t. +func (t FSType) String() string { + switch t { + case FsSquash: + return "Squashfs" + case FsExt3: + return "Ext3" + case FsImmuObj: + return "Archive" + case FsRaw: + return "Raw" + case FsEncryptedSquashfs: + return "Encrypted squashfs" + } + return "Unknown" +} + +// PartType represents the different SIF container partition types (system and data). +type PartType int32 + +// List of supported partition types. +const ( + PartSystem PartType = iota + 1 // partition hosts an operating system + PartPrimSys // partition hosts the primary operating system + PartData // partition hosts data only + PartOverlay // partition hosts an overlay +) + +// String returns a human-readable representation of t. +func (t PartType) String() string { + switch t { + case PartSystem: + return "System" + case PartPrimSys: + return "*System" + case PartData: + return "Data" + case PartOverlay: + return "Overlay" + } + return "Unknown" +} + +// hashType represents the different SIF hashing function types used to fingerprint data objects. +type hashType int32 + +// List of supported hash functions. +const ( + hashSHA256 hashType = iota + 1 + hashSHA384 + hashSHA512 + hashBLAKE2S + hashBLAKE2B +) + +// FormatType represents the different formats used to store cryptographic message objects. +type FormatType int32 + +// List of supported cryptographic message formats. +const ( + FormatOpenPGP FormatType = iota + 1 + FormatPEM +) + +// String returns a human-readable representation of t. +func (t FormatType) String() string { + switch t { + case FormatOpenPGP: + return "OpenPGP" + case FormatPEM: + return "PEM" + } + return "Unknown" +} + +// MessageType represents the different messages stored within cryptographic message objects. +type MessageType int32 + +// List of supported cryptographic message formats. +const ( + // openPGP formatted messages. + MessageClearSignature MessageType = 0x100 + + // PEM formatted messages. + MessageRSAOAEP MessageType = 0x200 +) + +// String returns a human-readable representation of t. +func (t MessageType) String() string { + switch t { + case MessageClearSignature: + return "Clear Signature" + case MessageRSAOAEP: + return "RSA-OAEP" + } + return "Unknown" +} + +// header describes a loaded SIF file. +type header struct { + LaunchScript [hdrLaunchLen]byte + + Magic [hdrMagicLen]byte + Version [hdrVersionLen]byte + Arch archType + ID uuid.UUID + + CreatedAt int64 + ModifiedAt int64 + + DescriptorsFree int64 + DescriptorsTotal int64 + DescriptorsOffset int64 + DescriptorsSize int64 + DataOffset int64 + DataSize int64 +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h. +func (h header) GetIntegrityReader() io.Reader { + return io.MultiReader( + bytes.NewReader(h.LaunchScript[:]), + bytes.NewReader(h.Magic[:]), + bytes.NewReader(h.Version[:]), + bytes.NewReader(h.ID[:]), + ) +} + +// ReadWriter describes the interface required to read and write SIF images. +type ReadWriter interface { + io.ReaderAt + io.WriteSeeker + Truncate(int64) error +} + +// FileImage describes the representation of a SIF file in memory. +type FileImage struct { + rw ReadWriter // Backing storage for image. + + h header // Raw global header from image. + rds []rawDescriptor // Raw descriptors from image. + + closeOnUnload bool // Close rw on Unload. + minIDs map[uint32]uint32 // Minimum object IDs for each group ID. +} + +// LaunchScript returns the image launch script. +func (f *FileImage) LaunchScript() string { + return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00")) +} + +// Version returns the SIF specification version of the image. +func (f *FileImage) Version() string { + return string(bytes.TrimRight(f.h.Version[:], "\x00")) +} + +// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU +// architecture cannot be determined. +func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() } + +// ID returns the ID of the image. +func (f *FileImage) ID() string { return f.h.ID.String() } + +// CreatedAt returns the creation time of the image. +func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) } + +// ModifiedAt returns the last modification time of the image. +func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) } + +// DescriptorsFree returns the number of free descriptors in the image. +func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree } + +// DescriptorsTotal returns the total number of descriptors in the image. +func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal } + +// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset } + +// DescriptorsSize returns the size (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize } + +// DataOffset returns the offset (in bytes) of the data section in the image. +func (f *FileImage) DataOffset() int64 { return f.h.DataOffset } + +// DataSize returns the size (in bytes) of the data section in the image. +func (f *FileImage) DataSize() int64 { return f.h.DataSize } + +// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the +// header of the image. +func (f *FileImage) GetHeaderIntegrityReader() io.Reader { + return f.h.GetIntegrityReader() +} diff --git a/vendor/github.com/vbauerster/mpb/v7/.travis.yml b/vendor/github.com/vbauerster/mpb/v7/.travis.yml deleted file mode 100644 index 9a203a67d2a..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le - -go: - - 1.14.x - -script: - - go test -race ./... - - for i in _examples/*/; do go build $i/*.go || exit 1; done diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v7/README.md index 90d4fe639ca..413f9e1dbf0 100644 --- a/vendor/github.com/vbauerster/mpb/v7/README.md +++ b/vendor/github.com/vbauerster/mpb/v7/README.md @@ -1,8 +1,7 @@ # Multi Progress Bar [![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v7) -[![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb) -[![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb) +[![Test status](https://github.com/vbauerster/mpb/actions/workflows/test.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/test.yml) [![Donate with PayPal](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/vbauerster) **mpb** is a Go lib for rendering progress bars in terminal applications. @@ -37,10 +36,10 @@ func main() { total := 100 name := "Single Bar:" - // adding a single bar, which will inherit container's width - bar := p.Add(int64(total), - // progress bar filler with customized style - mpb.NewBarFiller(mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟")), + // create a single bar, which will inherit container's width + bar := p.New(int64(total), + // BarFillerBuilder with custom style + mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), @@ -66,7 +65,7 @@ func main() { ```go var wg sync.WaitGroup - // passed &wg will be accounted at p.Wait() call + // passed wg will be accounted at p.Wait() call p := mpb.New(mpb.WithWaitGroup(&wg)) total, numBars := 100, 3 wg.Add(numBars) @@ -104,7 +103,7 @@ func main() { } }() } - // Waiting for passed &wg and for all bars to complete and flush + // wait for passed wg and for all bars to complete and flush p.Wait() ``` diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go index dabe1a47546..4991f4f15b4 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar.go @@ -5,9 +5,9 @@ import ( "context" "fmt" "io" - "log" "runtime/debug" "strings" + "sync" "time" "github.com/acarl005/stripansi" @@ -17,32 +17,21 @@ import ( // Bar represents a progress bar. type Bar struct { - priority int // used by heap - index int // used by heap - - toShutdown bool - toDrop bool - noPop bool - hasEwmaDecorators bool - operateState chan func(*bState) - frameCh chan *frame - - // cancel is called either by user or on complete event - cancel func() - // done is closed after cacheState is assigned - done chan struct{} - // cacheState is populated, right after close(b.done) - cacheState *bState - + index int // used by heap + priority int // used by heap + hasEwma bool + frameCh chan *renderFrame + operateState chan func(*bState) + done chan struct{} container *Progress - dlogger *log.Logger + bs *bState + cancel func() recoveredPanic interface{} } type extenderFunc func(in io.Reader, reqWidth int, st decor.Statistics) (out io.Reader, lines int) -// bState is actual bar state. It gets passed to *Bar.serve(...) monitor -// goroutine. +// bState is actual bar's state. type bState struct { id int priority int @@ -50,11 +39,10 @@ type bState struct { total int64 current int64 refill int64 - lastN int64 - iterated bool + lastIncrement int64 trimSpace bool completed bool - completeFlushed bool + aborted bool triggerComplete bool dropOnComplete bool noPop bool @@ -63,36 +51,33 @@ type bState struct { averageDecorators []decor.AverageDecorator ewmaDecorators []decor.EwmaDecorator shutdownListeners []decor.ShutdownListener - bufP, bufB, bufA *bytes.Buffer + buffers [3]*bytes.Buffer filler BarFiller middleware func(BarFiller) BarFiller extender extenderFunc + debugOut io.Writer - // runningBar is a key for *pState.parkedBars - runningBar *Bar - - debugOut io.Writer + afterBar *Bar // key for (*pState).queueBars + sync bool } -type frame struct { - reader io.Reader - lines int +type renderFrame struct { + reader io.Reader + lines int + shutdown bool } func newBar(container *Progress, bs *bState) *Bar { - logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id) ctx, cancel := context.WithCancel(container.ctx) bar := &Bar{ - container: container, priority: bs.priority, - toDrop: bs.dropOnComplete, - noPop: bs.noPop, + hasEwma: len(bs.ewmaDecorators) != 0, + frameCh: make(chan *renderFrame, 1), operateState: make(chan func(*bState)), - frameCh: make(chan *frame, 1), done: make(chan struct{}), + container: container, cancel: cancel, - dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile), } go bar.serve(ctx, bs) @@ -100,12 +85,20 @@ func newBar(container *Progress, bs *bState) *Bar { } // ProxyReader wraps r with metrics required for progress tracking. -// Panics if r is nil. +// If r is 'unknown total/size' reader it's mandatory to call +// (*Bar).SetTotal(-1, true) method after (Reader).Read returns io.EOF. +// Panics if r is nil. If bar is already completed or aborted, returns +// nil. func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { panic("expected non nil io.Reader") } - return newProxyReader(r, b) + select { + case <-b.done: + return nil + default: + return b.newProxyReader(r) + } } // ID returs id of the bar. @@ -115,18 +108,18 @@ func (b *Bar) ID() int { case b.operateState <- func(s *bState) { result <- s.id }: return <-result case <-b.done: - return b.cacheState.id + return b.bs.id } } -// Current returns bar's current number, in other words sum of all increments. +// Current returns bar's current value, in other words sum of all increments. func (b *Bar) Current() int64 { result := make(chan int64) select { case b.operateState <- func(s *bState) { result <- s.current }: return <-result case <-b.done: - return b.cacheState.current + return b.bs.current } } @@ -145,7 +138,7 @@ func (b *Bar) SetRefill(amount int64) { // TraverseDecorators traverses all available decorators and calls cb func on each. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { - done := make(chan struct{}) + sync := make(chan struct{}) select { case b.operateState <- func(s *bState) { for _, decorators := range [...][]decor.Decorator{ @@ -156,28 +149,56 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { cb(extractBaseDecorator(d)) } } - close(done) + close(sync) + }: + <-sync + case <-b.done: + } +} + +// EnableTriggerComplete enables triggering complete event. It's +// effective only for bar which was constructed with `total <= 0` and +// after total has been set with (*Bar).SetTotal(int64, false). If bar +// has been incremented to the total, complete event is triggered right +// away. +func (b *Bar) EnableTriggerComplete() { + select { + case b.operateState <- func(s *bState) { + if s.triggerComplete || s.total <= 0 { + return + } + if s.current >= s.total { + s.current = s.total + s.completed = true + go b.forceRefresh() + } else { + s.triggerComplete = true + } }: - <-done case <-b.done: } } -// SetTotal sets total dynamically. -// If total is less than or equal to zero it takes progress' current value. -func (b *Bar) SetTotal(total int64, triggerComplete bool) { +// SetTotal sets total to an arbitrary value. It's effective only for +// bar which was constructed with `total <= 0`. Setting total to negative +// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool). +// If triggerCompleteNow is true, total value is set to current and +// complete event is triggered right away. +func (b *Bar) SetTotal(total int64, triggerCompleteNow bool) { select { case b.operateState <- func(s *bState) { - s.triggerComplete = triggerComplete - if total <= 0 { + if s.triggerComplete { + return + } + if total < 0 { s.total = s.current } else { s.total = total } - if s.triggerComplete && !s.completed { + if triggerCompleteNow { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -189,13 +210,12 @@ func (b *Bar) SetTotal(total int64, triggerComplete bool) { func (b *Bar) SetCurrent(current int64) { select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = current - s.current + s.lastIncrement = current - s.current s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -214,15 +234,17 @@ func (b *Bar) IncrBy(n int) { // IncrInt64 increments progress by amount of n. func (b *Bar) IncrInt64(n int64) { + if n <= 0 { + return + } select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = n + s.lastIncrement = n s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefreshIfLastUncompleted() + go b.forceRefresh() } }: case <-b.done: @@ -236,10 +258,18 @@ func (b *Bar) IncrInt64(n int64) { func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { select { case b.operateState <- func(s *bState) { - ewmaIterationUpdate(false, s, dur) + if s.lastIncrement > 0 { + s.decoratorEwmaUpdate(dur) + s.lastIncrement = 0 + } else { + panic("increment required before ewma iteration update") + } }: case <-b.done: - ewmaIterationUpdate(true, b.cacheState, dur) + if b.bs.lastIncrement > 0 { + b.bs.decoratorEwmaUpdate(dur) + b.bs.lastIncrement = 0 + } } } @@ -249,9 +279,7 @@ func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { func (b *Bar) DecoratorAverageAdjust(start time.Time) { select { case b.operateState <- func(s *bState) { - for _, d := range s.averageDecorators { - d.AverageAdjust(start) - } + s.decoratorAverageAdjust(start) }: case <-b.done: } @@ -266,43 +294,33 @@ func (b *Bar) SetPriority(priority int) { // Abort interrupts bar's running goroutine. Abort won't be engaged // if bar is already in complete state. If drop is true bar will be -// removed as well. +// removed as well. To make sure that bar has been removed call +// (*Bar).Wait method. func (b *Bar) Abort(drop bool) { - done := make(chan struct{}) select { case b.operateState <- func(s *bState) { - if s.completed == true { - close(done) + if s.completed || s.aborted { return } - // container must be run during lifetime of this inner goroutine - // we control this by done channel declared above - go func() { - if drop { - b.container.dropBar(b) - } else { - var uncompleted int - b.container.traverseBars(func(bar *Bar) bool { - if b != bar && !bar.Completed() { - uncompleted++ - return false - } - return true - }) - if uncompleted == 0 { - b.container.refreshCh <- time.Now() - } - } - close(done) // release hold of Abort - }() - b.cancel() + s.aborted = true + s.dropOnComplete = drop + go b.forceRefresh() }: - // guarantee: container is alive during lifetime of this hold - <-done case <-b.done: } } +// Aborted reports whether the bar is in aborted state. +func (b *Bar) Aborted() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { result <- s.aborted }: + return <-result + case <-b.done: + return b.bs.aborted + } +} + // Completed reports whether the bar is in completed state. func (b *Bar) Completed() bool { result := make(chan bool) @@ -310,22 +328,28 @@ func (b *Bar) Completed() bool { case b.operateState <- func(s *bState) { result <- s.completed }: return <-result case <-b.done: - return true + return b.bs.completed } } -func (b *Bar) serve(ctx context.Context, s *bState) { +// Wait blocks until bar is completed or aborted. +func (b *Bar) Wait() { + <-b.done +} + +func (b *Bar) serve(ctx context.Context, bs *bState) { defer b.container.bwg.Done() + if bs.afterBar != nil && bs.sync { + bs.afterBar.Wait() + } for { select { case op := <-b.operateState: - op(s) + op(bs) case <-ctx.Done(): - // Notifying decorators about shutdown event - for _, sl := range s.shutdownListeners { - sl.Shutdown() - } - b.cacheState = s + bs.aborted = !bs.completed + bs.decoratorShutdownNotify() + b.bs = bs close(b.done) return } @@ -335,76 +359,62 @@ func (b *Bar) serve(ctx context.Context, s *bState) { func (b *Bar) render(tw int) { select { case b.operateState <- func(s *bState) { + var reader io.Reader + var lines int stat := newStatistics(tw, s) defer func() { // recovering if user defined decorator panics for example if p := recover(); p != nil { - if b.recoveredPanic == nil { - s.extender = makePanicExtender(p) - b.toShutdown = !b.toShutdown - b.recoveredPanic = p + if s.debugOut != nil { + fmt.Fprintln(s.debugOut, p) + _, _ = s.debugOut.Write(debug.Stack()) } - reader, lines := s.extender(nil, s.reqWidth, stat) - b.frameCh <- &frame{reader, lines + 1} - b.dlogger.Println(p) + s.aborted = !s.completed + s.extender = makePanicExtender(p) + reader, lines = s.extender(nil, s.reqWidth, stat) + b.recoveredPanic = p } - s.completeFlushed = s.completed + frame := renderFrame{ + reader: reader, + lines: lines + 1, + shutdown: s.completed || s.aborted, + } + if frame.shutdown { + b.cancel() + } + b.frameCh <- &frame }() - reader, lines := s.extender(s.draw(stat), s.reqWidth, stat) - b.toShutdown = s.completed && !s.completeFlushed - b.frameCh <- &frame{reader, lines + 1} + if b.recoveredPanic == nil { + reader = s.draw(stat) + } + reader, lines = s.extender(reader, s.reqWidth, stat) }: case <-b.done: - s := b.cacheState - stat := newStatistics(tw, s) - var r io.Reader + var reader io.Reader + var lines int + stat, s := newStatistics(tw, b.bs), b.bs if b.recoveredPanic == nil { - r = s.draw(stat) - } - reader, lines := s.extender(r, s.reqWidth, stat) - b.frameCh <- &frame{reader, lines + 1} - } -} - -func (b *Bar) subscribeDecorators() { - var averageDecorators []decor.AverageDecorator - var ewmaDecorators []decor.EwmaDecorator - var shutdownListeners []decor.ShutdownListener - b.TraverseDecorators(func(d decor.Decorator) { - if d, ok := d.(decor.AverageDecorator); ok { - averageDecorators = append(averageDecorators, d) + reader = s.draw(stat) } - if d, ok := d.(decor.EwmaDecorator); ok { - ewmaDecorators = append(ewmaDecorators, d) + reader, lines = s.extender(reader, s.reqWidth, stat) + b.frameCh <- &renderFrame{ + reader: reader, + lines: lines + 1, } - if d, ok := d.(decor.ShutdownListener); ok { - shutdownListeners = append(shutdownListeners, d) - } - }) - b.hasEwmaDecorators = len(ewmaDecorators) != 0 - select { - case b.operateState <- func(s *bState) { - s.averageDecorators = averageDecorators - s.ewmaDecorators = ewmaDecorators - s.shutdownListeners = shutdownListeners - }: - case <-b.done: } } -func (b *Bar) forceRefreshIfLastUncompleted() { - var uncompleted int +func (b *Bar) forceRefresh() { + var anyOtherRunning bool b.container.traverseBars(func(bar *Bar) bool { - if b != bar && !bar.Completed() { - uncompleted++ - return false - } - return true + anyOtherRunning = b != bar && bar.isRunning() + return !anyOtherRunning }) - if uncompleted == 0 { + if !anyOtherRunning { for { select { case b.container.refreshCh <- time.Now(): + time.Sleep(prr) case <-b.done: return } @@ -412,51 +422,64 @@ func (b *Bar) forceRefreshIfLastUncompleted() { } } +func (b *Bar) isRunning() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { + result <- !s.completed && !s.aborted + }: + return <-result + case <-b.done: + return false + } +} + func (b *Bar) wSyncTable() [][]chan int { result := make(chan [][]chan int) select { case b.operateState <- func(s *bState) { result <- s.wSyncTable() }: return <-result case <-b.done: - return b.cacheState.wSyncTable() + return b.bs.wSyncTable() } } func (s *bState) draw(stat decor.Statistics) io.Reader { + bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] nlr := strings.NewReader("\n") tw := stat.AvailableWidth for _, d := range s.pDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufP.WriteString(str) + bufP.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…")) - s.bufP.Reset() + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…")) + bufP.Reset() return io.MultiReader(trunc, nlr) } if !s.trimSpace && stat.AvailableWidth > 1 { stat.AvailableWidth -= 2 - s.bufB.WriteByte(' ') - defer s.bufB.WriteByte(' ') + bufB.WriteByte(' ') + defer bufB.WriteByte(' ') } tw = stat.AvailableWidth for _, d := range s.aDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufA.WriteString(str) + bufA.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…")) - s.bufA.Reset() - return io.MultiReader(s.bufP, s.bufB, trunc, nlr) + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…")) + bufA.Reset() + return io.MultiReader(bufP, bufB, trunc, nlr) } - s.filler.Fill(s.bufB, s.reqWidth, stat) + s.filler.Fill(bufB, s.reqWidth, stat) - return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr) + return io.MultiReader(bufP, bufB, bufA, nlr) } func (s *bState) wSyncTable() [][]chan int { @@ -481,14 +504,86 @@ func (s *bState) wSyncTable() [][]chan int { return table } +func (s *bState) subscribeDecorators() { + for _, decorators := range [...][]decor.Decorator{ + s.pDecorators, + s.aDecorators, + } { + for _, d := range decorators { + d = extractBaseDecorator(d) + if d, ok := d.(decor.AverageDecorator); ok { + s.averageDecorators = append(s.averageDecorators, d) + } + if d, ok := d.(decor.EwmaDecorator); ok { + s.ewmaDecorators = append(s.ewmaDecorators, d) + } + if d, ok := d.(decor.ShutdownListener); ok { + s.shutdownListeners = append(s.shutdownListeners, d) + } + } + } +} + +func (s bState) decoratorEwmaUpdate(dur time.Duration) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.ewmaDecorators); i++ { + switch d := s.ewmaDecorators[i]; i { + case len(s.ewmaDecorators) - 1: + d.EwmaUpdate(s.lastIncrement, dur) + default: + wg.Add(1) + go func() { + d.EwmaUpdate(s.lastIncrement, dur) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorAverageAdjust(start time.Time) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.averageDecorators); i++ { + switch d := s.averageDecorators[i]; i { + case len(s.averageDecorators) - 1: + d.AverageAdjust(start) + default: + wg.Add(1) + go func() { + d.AverageAdjust(start) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorShutdownNotify() { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.shutdownListeners); i++ { + switch d := s.shutdownListeners[i]; i { + case len(s.shutdownListeners) - 1: + d.Shutdown() + default: + wg.Add(1) + go func() { + d.Shutdown() + wg.Done() + }() + } + } + wg.Wait() +} + func newStatistics(tw int, s *bState) decor.Statistics { return decor.Statistics{ - ID: s.id, AvailableWidth: tw, + ID: s.id, Total: s.total, Current: s.current, Refill: s.refill, - Completed: s.completeFlushed, + Completed: s.completed, + Aborted: s.aborted, } } @@ -499,27 +594,13 @@ func extractBaseDecorator(d decor.Decorator) decor.Decorator { return d } -func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) { - if !done && !s.iterated { - panic("increment required before ewma iteration update") - } else { - s.iterated = false - } - for _, d := range s.ewmaDecorators { - d.EwmaUpdate(s.lastN, dur) - } -} - func makePanicExtender(p interface{}) extenderFunc { pstr := fmt.Sprint(p) - stack := debug.Stack() - stackLines := bytes.Count(stack, []byte("\n")) return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) { mr := io.MultiReader( strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")), - strings.NewReader(fmt.Sprintf("\n%#v\n", st)), - bytes.NewReader(stack), + strings.NewReader("\n"), ) - return mr, stackLines + 1 + return mr, 0 } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go index a69087c474e..81177fc2eea 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go @@ -9,31 +9,42 @@ import ( // BarFiller interface. // Bar (without decorators) renders itself by calling BarFiller's Fill method. // -// reqWidth is requested width, set by `func WithWidth(int) ContainerOption`. +// reqWidth is requested width set by `func WithWidth(int) ContainerOption`. // If not set, it defaults to terminal width. // -// Default implementations can be obtained via: -// -// func NewBarFiller(BarStyle()) BarFiller -// func NewBarFiller(SpinnerStyle()) BarFiller -// type BarFiller interface { Fill(w io.Writer, reqWidth int, stat decor.Statistics) } // BarFillerBuilder interface. +// Default implementations are: +// +// BarStyle() +// SpinnerStyle() +// NopStyle() +// type BarFillerBuilder interface { Build() BarFiller } -// BarFillerFunc is function type adapter to convert function into BarFiller. +// BarFillerFunc is function type adapter to convert compatible function +// into BarFiller interface. type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics) func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { f(w, reqWidth, stat) } +// BarFillerBuilderFunc is function type adapter to convert compatible +// function into BarFillerBuilder interface. +type BarFillerBuilderFunc func() BarFiller + +func (f BarFillerBuilderFunc) Build() BarFiller { + return f() +} + // NewBarFiller constructs a BarFiller from provided BarFillerBuilder. +// Deprecated. Prefer using `*Progress.New(...)` directly. func NewBarFiller(b BarFillerBuilder) BarFiller { return b.Build() } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go index 80b2104555d..d8bf90a4a94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go @@ -32,13 +32,13 @@ type BarStyleComposer interface { } type bFiller struct { + rev bool components [components]*component tip struct { count uint onComplete *component frames []*component } - flush func(dst io.Writer, filling, padding [][]byte) } type component struct { @@ -113,14 +113,7 @@ func (s *barStyle) Reverse() BarStyleComposer { } func (s *barStyle) Build() BarFiller { - bf := new(bFiller) - if s.rev { - bf.flush = func(dst io.Writer, filling, padding [][]byte) { - flush(dst, padding, filling) - } - } else { - bf.flush = flush - } + bf := &bFiller{rev: s.rev} bf.components[iLbound] = &component{ width: runewidth.StringWidth(stripansi.Strip(s.lbound)), bytes: []byte(s.lbound), @@ -164,8 +157,8 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } - w.Write(s.components[iLbound].bytes) - defer w.Write(s.components[iRbound].bytes) + mustWrite(w, s.components[iLbound].bytes) + defer mustWrite(w, s.components[iRbound].bytes) if width == 0 { return @@ -236,14 +229,25 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { } } - s.flush(w, filling, padding) + if s.rev { + flush(w, padding, filling) + } else { + flush(w, filling, padding) + } } -func flush(dst io.Writer, filling, padding [][]byte) { +func flush(w io.Writer, filling, padding [][]byte) { for i := len(filling) - 1; i >= 0; i-- { - dst.Write(filling[i]) + mustWrite(w, filling[i]) } for i := 0; i < len(padding); i++ { - dst.Write(padding[i]) + mustWrite(w, padding[i]) + } +} + +func mustWrite(w io.Writer, p []byte) { + _, err := w.Write(p) + if err != nil { + panic(err) } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go new file mode 100644 index 00000000000..1a7086fecb6 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go @@ -0,0 +1,14 @@ +package mpb + +import ( + "io" + + "github.com/vbauerster/mpb/v7/decor" +) + +// NopStyle provides BarFillerBuilder which builds NOP BarFiller. +func NopStyle() BarFillerBuilder { + return BarFillerBuilderFunc(func() BarFiller { + return BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + }) +} diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go index 58ae1c53283..d38525efc94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go @@ -73,15 +73,19 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } + var err error rest := width - frameWidth switch s.position { case positionLeft: - io.WriteString(w, frame+strings.Repeat(" ", rest)) + _, err = io.WriteString(w, frame+strings.Repeat(" ", rest)) case positionRight: - io.WriteString(w, strings.Repeat(" ", rest)+frame) + _, err = io.WriteString(w, strings.Repeat(" ", rest)+frame) default: str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2) - io.WriteString(w, str) + _, err = io.WriteString(w, str) + } + if err != nil { + panic(err) } s.count++ } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_option.go b/vendor/github.com/vbauerster/mpb/v7/bar_option.go index 46b7de0bf56..8599f0a57b9 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_option.go @@ -5,12 +5,20 @@ import ( "io" "github.com/vbauerster/mpb/v7/decor" - "github.com/vbauerster/mpb/v7/internal" ) // BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) +func skipNil(decorators []decor.Decorator) (filtered []decor.Decorator) { + for _, d := range decorators { + if d != nil { + filtered = append(filtered, d) + } + } + return +} + func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) { type mergeWrapper interface { MergeUnwrap() []decor.Decorator @@ -26,14 +34,14 @@ func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Deco // AppendDecorators let you inject decorators to the bar's right side. func AppendDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.aDecorators, decorators...) + s.addDecorators(&s.aDecorators, skipNil(decorators)...) } } // PrependDecorators let you inject decorators to the bar's left side. func PrependDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.pDecorators, decorators...) + s.addDecorators(&s.pDecorators, skipNil(decorators)...) } } @@ -51,14 +59,17 @@ func BarWidth(width int) BarOption { } } -// BarQueueAfter queues this (being constructed) bar to relplace -// runningBar after it has been completed. -func BarQueueAfter(runningBar *Bar) BarOption { - if runningBar == nil { +// BarQueueAfter puts this (being constructed) bar into the queue. +// When argument bar completes or aborts queued bar replaces its place. +// If sync is true queued bar is suspended until argument bar completes +// or aborts. +func BarQueueAfter(bar *Bar, sync bool) BarOption { + if bar == nil { return nil } return func(s *bState) { - s.runningBar = runningBar + s.afterBar = bar + s.sync = sync } } @@ -81,7 +92,10 @@ func BarFillerOnComplete(message string) BarOption { return BarFillerMiddleware(func(base BarFiller) BarFiller { return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) { if st.Completed { - io.WriteString(w, message) + _, err := io.WriteString(w, message) + if err != nil { + panic(err) + } } else { base.Fill(w, reqWidth, st) } @@ -138,9 +152,12 @@ func BarNoPop() BarOption { } } -// BarOptional will invoke provided option only when pick is true. -func BarOptional(option BarOption, pick bool) BarOption { - return BarOptOn(option, internal.Predicate(pick)) +// BarOptional will invoke provided option only when cond is true. +func BarOptional(option BarOption, cond bool) BarOption { + if cond { + return option + } + return nil } // BarOptOn will invoke provided option only when higher order predicate diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v7/container_option.go index a858c3c51dd..e523a175938 100644 --- a/vendor/github.com/vbauerster/mpb/v7/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/container_option.go @@ -5,8 +5,6 @@ import ( "io/ioutil" "sync" "time" - - "github.com/vbauerster/mpb/v7/internal" ) // ContainerOption is a func option to alter default behavior of a bar @@ -101,9 +99,12 @@ func PopCompletedMode() ContainerOption { } } -// ContainerOptional will invoke provided option only when pick is true. -func ContainerOptional(option ContainerOption, pick bool) ContainerOption { - return ContainerOptOn(option, internal.Predicate(pick)) +// ContainerOptional will invoke provided option only when cond is true. +func ContainerOptional(option ContainerOption, cond bool) ContainerOption { + if cond { + return option + } + return nil } // ContainerOptOn will invoke provided option only when higher order diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go index 925c8b1dcf1..fac15b3bce9 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go +++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go @@ -11,7 +11,7 @@ import ( // ErrNotTTY not a TeleTYpewriter error. var ErrNotTTY = errors.New("not a terminal") -// http://ascii-table.com/ansi-escape-sequences.php +// https://github.com/dylanaraps/pure-sh-bible#cursor-movement const ( escOpen = "\x1b[" cuuAndEd = "A\x1b[J" @@ -76,9 +76,9 @@ func (w *Writer) GetWidth() (int, error) { return tw, err } -func (w *Writer) ansiCuuAndEd() (err error) { +func (w *Writer) ansiCuuAndEd() error { buf := make([]byte, 8) buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10) - _, err = w.out.Write(append(buf, cuuAndEd...)) - return + _, err := w.out.Write(append(buf, cuuAndEd...)) + return err } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go index e81fae367ee..aad7709c062 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go @@ -47,12 +47,13 @@ const ( // Statistics consists of progress related statistics, that Decorator // may need. type Statistics struct { - ID int AvailableWidth int + ID int Total int64 Current int64 Refill int64 Completed bool + Aborted bool } // Decorator interface. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go index e41406a64da..84767115515 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go @@ -17,6 +17,9 @@ import ( // +----+--------+---------+--------+ // func Merge(decorator Decorator, placeholders ...WC) Decorator { + if decorator == nil { + return nil + } if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 { return decorator } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go new file mode 100644 index 00000000000..10ff67009cc --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go @@ -0,0 +1,41 @@ +package decor + +// OnAbort returns decorator, which wraps provided decorator with sole +// purpose to display provided message on abort event. It has no effect +// if bar.Abort(drop bool) is called with true argument. +// +// `decorator` Decorator to wrap +// +// `message` message to display on abort event +// +func OnAbort(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } + d := &onAbortWrapper{ + Decorator: decorator, + msg: message, + } + if md, ok := decorator.(*mergeDecorator); ok { + d.Decorator, md.Decorator = md.Decorator, d + return md + } + return d +} + +type onAbortWrapper struct { + Decorator + msg string +} + +func (d *onAbortWrapper) Decor(s Statistics) string { + if s.Aborted { + wc := d.GetConf() + return wc.FormatMsg(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d *onAbortWrapper) Base() Decorator { + return d.Decorator +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go index f46b19abaa9..2ada2b31b16 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go @@ -1,6 +1,6 @@ package decor -// OnComplete returns decorator, which wraps provided decorator, with +// OnComplete returns decorator, which wraps provided decorator with // sole purpose to display provided message on complete event. // // `decorator` Decorator to wrap @@ -8,6 +8,9 @@ package decor // `message` message to display on complete event // func OnComplete(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } d := &onCompleteWrapper{ Decorator: decorator, msg: message, diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go new file mode 100644 index 00000000000..a9db0653aee --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go @@ -0,0 +1,27 @@ +package decor + +// OnPredicate returns decorator if predicate evaluates to true. +// +// `decorator` Decorator +// +// `predicate` func() bool +// +func OnPredicate(decorator Decorator, predicate func() bool) Decorator { + if predicate() { + return decorator + } + return nil +} + +// OnCondition returns decorator if condition is true. +// +// `decorator` Decorator +// +// `cond` bool +// +func OnCondition(decorator Decorator, cond bool) Decorator { + if cond { + return decorator + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go new file mode 100644 index 00000000000..c6a34384e63 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go @@ -0,0 +1,10 @@ +package decor + +import "io" + +func mustWriteString(w io.Writer, s string) { + _, err := io.WriteString(w, s) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go index 2b0a7a9562c..e72668993ee 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "strconv" "github.com/vbauerster/mpb/v7/internal" @@ -24,12 +23,11 @@ func (s percentageType) Format(st fmt.State, verb rune) { } } - io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, "%") + mustWriteString(st, "%") } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go index e4b9740584b..09ecc23f807 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go @@ -2,8 +2,6 @@ package decor import ( "fmt" - "io" - "math" "strconv" ) @@ -47,16 +45,15 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { unit = _iMiB case self < _iTiB: unit = _iGiB - case self <= math.MaxInt64: + default: unit = _iTiB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, unit.String()) + mustWriteString(st, unit.String()) } const ( @@ -96,14 +93,13 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { unit = _MB case self < _TB: unit = _GB - case self <= math.MaxInt64: + default: unit = _TB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + mustWriteString(st, " ") } - io.WriteString(st, unit.String()) + mustWriteString(st, unit.String()) } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go index 634edabfdc0..f052352fc40 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "math" "time" @@ -24,7 +23,7 @@ type speedFormatter struct { func (self *speedFormatter) Format(st fmt.State, verb rune) { self.Formatter.Format(st, verb) - io.WriteString(st, "/s") + mustWriteString(st, "/s") } // EwmaSpeed exponential-weighted-moving-average based speed decorator. diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go b/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go deleted file mode 100644 index 1e4dd24d9da..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go +++ /dev/null @@ -1,6 +0,0 @@ -package internal - -// Predicate helper for internal use. -func Predicate(pick bool) func() bool { - return func() bool { return pick } -} diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go index c60c6569406..1d9a53e5c9d 100644 --- a/vendor/github.com/vbauerster/mpb/v7/progress.go +++ b/vendor/github.com/vbauerster/mpb/v7/progress.go @@ -6,8 +6,6 @@ import ( "context" "fmt" "io" - "io/ioutil" - "log" "math" "os" "sync" @@ -18,12 +16,10 @@ import ( ) const ( - // default RefreshRate - prr = 150 * time.Millisecond + prr = 150 * time.Millisecond // default RefreshRate ) -// Progress represents a container that renders one or more progress -// bars. +// Progress represents a container that renders one or more progress bars. type Progress struct { ctx context.Context uwg *sync.WaitGroup @@ -33,17 +29,14 @@ type Progress struct { done chan struct{} refreshCh chan time.Time once sync.Once - dlogger *log.Logger } -// pState holds bars in its priorityQueue. It gets passed to -// *Progress.serve(...) monitor goroutine. +// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. type pState struct { - bHeap priorityQueue - heapUpdated bool - pMatrix map[int][]chan int - aMatrix map[int][]chan int - barShutdownQueue []*Bar + bHeap priorityQueue + heapUpdated bool + pMatrix map[int][]chan int + aMatrix map[int][]chan int // following are provided/overrided by user idCount int @@ -55,27 +48,26 @@ type pState struct { externalRefresh <-chan interface{} renderDelay <-chan struct{} shutdownNotifier chan struct{} - parkedBars map[*Bar]*Bar + queueBars map[*Bar]*Bar output io.Writer debugOut io.Writer } // New creates new Progress container instance. It's not possible to -// reuse instance after *Progress.Wait() method has been called. +// reuse instance after (*Progress).Wait method has been called. func New(options ...ContainerOption) *Progress { return NewWithContext(context.Background(), options...) } // NewWithContext creates new Progress container instance with provided -// context. It's not possible to reuse instance after *Progress.Wait() +// context. It's not possible to reuse instance after (*Progress).Wait // method has been called. func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { s := &pState{ - bHeap: priorityQueue{}, - rr: prr, - parkedBars: make(map[*Bar]*Bar), - output: os.Stdout, - debugOut: ioutil.Discard, + bHeap: priorityQueue{}, + rr: prr, + queueBars: make(map[*Bar]*Bar), + output: os.Stdout, } for _, opt := range options { @@ -91,7 +83,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { bwg: new(sync.WaitGroup), operateState: make(chan func(*pState)), done: make(chan struct{}), - dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile), } p.cwg.Add(1) @@ -99,25 +90,27 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { return p } -// AddBar creates a bar with default bar filler. Different filler can -// be chosen and applied via `*Progress.Add(...) *Bar` method. +// AddBar creates a bar with default bar filler. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(BarStyle()), options...) + return p.New(total, BarStyle(), options...) } -// AddSpinner creates a bar with default spinner filler. Different -// filler can be chosen and applied via `*Progress.Add(...) *Bar` -// method. +// AddSpinner creates a bar with default spinner filler. func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(SpinnerStyle()), options...) + return p.New(total, SpinnerStyle(), options...) +} + +// New creates a bar with provided BarFillerBuilder. +func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { + return p.Add(total, builder.Build(), options...) } // Add creates a bar which renders itself by provided filler. -// If `total <= 0` trigger complete event is disabled until reset with *bar.SetTotal(int64, bool). -// Panics if *Progress instance is done, i.e. called after *Progress.Wait(). +// If `total <= 0` triggering complete event by increment methods is disabled. +// Panics if *Progress instance is done, i.e. called after (*Progress).Wait(). func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { - filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + filler = NopStyle().Build() } p.bwg.Add(1) result := make(chan *Bar) @@ -125,9 +118,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar case p.operateState <- func(ps *pState) { bs := ps.makeBarState(total, filler, options...) bar := newBar(p, bs) - if bs.runningBar != nil { - bs.runningBar.noPop = true - ps.parkedBars[bs.runningBar] = bar + if bs.afterBar != nil { + ps.queueBars[bs.afterBar] = bar } else { heap.Push(&ps.bHeap, bar) ps.heapUpdated = true @@ -136,7 +128,6 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar result <- bar }: bar := <-result - bar.subscribeDecorators() return bar case <-p.done: p.bwg.Done() @@ -144,21 +135,8 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar } } -func (p *Progress) dropBar(b *Bar) { - select { - case p.operateState <- func(s *pState) { - if b.index < 0 { - return - } - heap.Remove(&s.bHeap, b.index) - s.heapUpdated = true - }: - case <-p.done: - } -} - func (p *Progress) traverseBars(cb func(b *Bar) bool) { - done := make(chan struct{}) + sync := make(chan struct{}) select { case p.operateState <- func(s *pState) { for i := 0; i < s.bHeap.Len(); i++ { @@ -167,9 +145,9 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { break } } - close(done) + close(sync) }: - <-done + <-sync case <-p.done: } } @@ -203,8 +181,8 @@ func (p *Progress) BarCount() int { // After this method has been called, there is no way to reuse *Progress // instance. func (p *Progress) Wait() { + // wait for user wg, if any if p.uwg != nil { - // wait for user wg p.uwg.Wait() } @@ -232,12 +210,26 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { op(s) case <-p.refreshCh: if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } case <-s.shutdownNotifier: for s.heapUpdated { if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } } return @@ -245,6 +237,64 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { } } +func (s *pState) render(cw *cwriter.Writer) error { + if s.heapUpdated { + s.updateSyncMatrix() + s.heapUpdated = false + } + syncWidth(s.pMatrix) + syncWidth(s.aMatrix) + + tw, err := cw.GetWidth() + if err != nil { + tw = s.reqWidth + } + for i := 0; i < s.bHeap.Len(); i++ { + bar := s.bHeap[i] + go bar.render(tw) + } + + return s.flush(cw) +} + +func (s *pState) flush(cw *cwriter.Writer) error { + var lines int + pool := make([]*Bar, 0, s.bHeap.Len()) + for s.bHeap.Len() > 0 { + b := heap.Pop(&s.bHeap).(*Bar) + frame := <-b.frameCh + lines += frame.lines + _, err := cw.ReadFrom(frame.reader) + if err != nil { + return err + } + if frame.shutdown { + b.Wait() // waiting for b.done, so it's safe to read b.bs + var toDrop bool + if qb, ok := s.queueBars[b]; ok { + delete(s.queueBars, b) + qb.priority = b.priority + pool = append(pool, qb) + toDrop = true + } else if s.popCompleted && !b.bs.noPop { + lines -= frame.lines + toDrop = true + } + if toDrop || b.bs.dropOnComplete { + s.heapUpdated = true + continue + } + } + pool = append(pool, b) + } + + for _, b := range pool { + heap.Push(&s.bHeap, b) + } + + return cw.Flush(lines) +} + func (s *pState) newTicker(done <-chan struct{}) chan time.Time { ch := make(chan time.Time) if s.shutdownNotifier == nil { @@ -283,75 +333,6 @@ func (s *pState) newTicker(done <-chan struct{}) chan time.Time { return ch } -func (s *pState) render(cw *cwriter.Writer) error { - if s.heapUpdated { - s.updateSyncMatrix() - s.heapUpdated = false - } - syncWidth(s.pMatrix) - syncWidth(s.aMatrix) - - tw, err := cw.GetWidth() - if err != nil { - tw = s.reqWidth - } - for i := 0; i < s.bHeap.Len(); i++ { - bar := s.bHeap[i] - go bar.render(tw) - } - - return s.flush(cw) -} - -func (s *pState) flush(cw *cwriter.Writer) error { - var totalLines int - bm := make(map[*Bar]int, s.bHeap.Len()) - for s.bHeap.Len() > 0 { - b := heap.Pop(&s.bHeap).(*Bar) - frame := <-b.frameCh - cw.ReadFrom(frame.reader) - if b.toShutdown { - if b.recoveredPanic != nil { - s.barShutdownQueue = append(s.barShutdownQueue, b) - b.toShutdown = false - } else { - // shutdown at next flush - // this ensures no bar ends up with less than 100% rendered - defer func() { - s.barShutdownQueue = append(s.barShutdownQueue, b) - }() - } - } - bm[b] = frame.lines - totalLines += frame.lines - } - - for _, b := range s.barShutdownQueue { - if parkedBar := s.parkedBars[b]; parkedBar != nil { - parkedBar.priority = b.priority - heap.Push(&s.bHeap, parkedBar) - delete(s.parkedBars, b) - b.toDrop = true - } - if s.popCompleted && !b.noPop { - totalLines -= bm[b] - b.toDrop = true - } - if b.toDrop { - delete(bm, b) - s.heapUpdated = true - } - b.cancel() - } - s.barShutdownQueue = s.barShutdownQueue[0:0] - - for b := range bm { - heap.Push(&s.bHeap, b) - } - - return cw.Flush(totalLines) -} - func (s *pState) updateSyncMatrix() { s.pMatrix = make(map[int][]chan int) s.aMatrix = make(map[int][]chan int) @@ -400,9 +381,11 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio bs.priority = -(math.MaxInt32 - s.idCount) } - bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) - bs.bufB = bytes.NewBuffer(make([]byte, 0, 256)) - bs.bufA = bytes.NewBuffer(make([]byte, 0, 128)) + for i := 0; i < len(bs.buffers); i++ { + bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) + } + + bs.subscribeDecorators() return bs } @@ -413,7 +396,7 @@ func syncWidth(matrix map[int][]chan int) { } } -var maxWidthDistributor = func(column []chan int) { +func maxWidthDistributor(column []chan int) { var maxWidth int for _, ch := range column { if w := <-ch; w > maxWidth { diff --git a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go index 316f438d76b..b0dd89d458c 100644 --- a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go +++ b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go @@ -11,73 +11,63 @@ type proxyReader struct { bar *Bar } -func (x *proxyReader) Read(p []byte) (int, error) { +func (x proxyReader) Read(p []byte) (int, error) { n, err := x.ReadCloser.Read(p) x.bar.IncrBy(n) - if err == io.EOF { - go x.bar.SetTotal(0, true) - } return n, err } type proxyWriterTo struct { - io.ReadCloser // *proxyReader - wt io.WriterTo - bar *Bar + proxyReader + wt io.WriterTo } -func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) { n, err := x.wt.WriteTo(w) x.bar.IncrInt64(n) - if err == io.EOF { - go x.bar.SetTotal(0, true) - } return n, err } type ewmaProxyReader struct { - io.ReadCloser // *proxyReader - bar *Bar - iT time.Time + proxyReader } -func (x *ewmaProxyReader) Read(p []byte) (int, error) { - n, err := x.ReadCloser.Read(p) +func (x ewmaProxyReader) Read(p []byte) (int, error) { + start := time.Now() + n, err := x.proxyReader.Read(p) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } type ewmaProxyWriterTo struct { - io.ReadCloser // *ewmaProxyReader - wt io.WriterTo // *proxyWriterTo - bar *Bar - iT time.Time + ewmaProxyReader + wt proxyWriterTo } -func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { + start := time.Now() n, err := x.wt.WriteTo(w) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } -func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser { - rc := toReadCloser(r) - rc = &proxyReader{rc, bar} - - if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators { - now := time.Now() - rc = &ewmaProxyReader{rc, bar, now} - if isWriterTo { - rc = &ewmaProxyWriterTo{rc, wt, bar, now} +func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) { + pr := proxyReader{toReadCloser(r), b} + if wt, ok := r.(io.WriterTo); ok { + pw := proxyWriterTo{pr, wt} + if b.hasEwma { + rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw} + } else { + rc = pw } - } else if isWriterTo { - rc = &proxyWriterTo{rc, wt, bar} + } else if b.hasEwma { + rc = ewmaProxyReader{pr} + } else { + rc = pr } return rc } diff --git a/vendor/github.com/vishvananda/netlink/.travis.yml b/vendor/github.com/vishvananda/netlink/.travis.yml deleted file mode 100644 index 80219c69d6f..00000000000 --- a/vendor/github.com/vishvananda/netlink/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go -go: - - "1.12.x" - - "1.13.x" - - "1.14.x" -before_script: - # make sure we keep path in tact when we sudo - - sudo sed -i -e 's/^Defaults\tsecure_path.*$//' /etc/sudoers - # modprobe ip_gre or else the first gre device can't be deleted - - sudo modprobe ip_gre - # modprobe nf_conntrack for the conntrack testing - - sudo modprobe nf_conntrack - - sudo modprobe nf_conntrack_netlink - - sudo modprobe nf_conntrack_ipv4 - - sudo modprobe nf_conntrack_ipv6 - - sudo modprobe sch_hfsc - - sudo modprobe sch_sfq -install: - - go get -v -t ./... -go_import_path: github.com/vishvananda/netlink diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md index 0e4ddb01649..0128bc67d55 100644 --- a/vendor/github.com/vishvananda/netlink/README.md +++ b/vendor/github.com/vishvananda/netlink/README.md @@ -1,6 +1,6 @@ # netlink - netlink library for go # -[![Build Status](https://app.travis-ci.com/vishvananda/netlink.svg?branch=master)](https://app.travis-ci.com/github/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) +![Build Status](https://github.com/vishvananda/netlink/actions/workflows/main.yml/badge.svg) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) The netlink package provides a simple netlink library for go. Netlink is the interface a user-space program in linux uses to communicate with diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index e4a167ddaef..72862ce1f44 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -176,7 +176,7 @@ func AddrList(link Link, family int) ([]Addr, error) { // The list can be filtered by link and ip family. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(family) + msg := nl.NewIfAddrmsg(family) req.AddData(msg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) @@ -296,13 +296,13 @@ type AddrUpdate struct { // AddrSubscribe takes a chan down which notifications will be sent // when addresses change. Close the 'done' chan to stop subscription. func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0) + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil) } // AddrSubscribeAt works like AddrSubscribe plus it allows the caller // to choose the network namespace in which to subscribe (ns). func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { - return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0) + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil) } // AddrSubscribeOptions contains a set of options to use with @@ -312,6 +312,7 @@ type AddrSubscribeOptions struct { ErrorCallback func(error) ListExisting bool ReceiveBufferSize int + ReceiveTimeout *unix.Timeval } // AddrSubscribeWithOptions work like AddrSubscribe but enable to @@ -322,14 +323,20 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option none := netns.None() options.Namespace = &none } - return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize) + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, options.ReceiveBufferSize, options.ReceiveTimeout) } -func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int) error { +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, rcvbuf int, rcvTimeout *unix.Timeval) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) if err != nil { return err } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if done != nil { go func() { <-done diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index 2dc34b99592..2d798b0fbbc 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -157,6 +157,39 @@ func NewConnmarkAction() *ConnmarkAction { } } +type CsumUpdateFlags uint32 + +const ( + TCA_CSUM_UPDATE_FLAG_IPV4HDR CsumUpdateFlags = 1 + TCA_CSUM_UPDATE_FLAG_ICMP CsumUpdateFlags = 2 + TCA_CSUM_UPDATE_FLAG_IGMP CsumUpdateFlags = 4 + TCA_CSUM_UPDATE_FLAG_TCP CsumUpdateFlags = 8 + TCA_CSUM_UPDATE_FLAG_UDP CsumUpdateFlags = 16 + TCA_CSUM_UPDATE_FLAG_UDPLITE CsumUpdateFlags = 32 + TCA_CSUM_UPDATE_FLAG_SCTP CsumUpdateFlags = 64 +) + +type CsumAction struct { + ActionAttrs + UpdateFlags CsumUpdateFlags +} + +func (action *CsumAction) Type() string { + return "csum" +} + +func (action *CsumAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewCsumAction() *CsumAction { + return &CsumAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + type MirredAct uint8 func (a MirredAct) String() string { @@ -260,6 +293,40 @@ func NewSkbEditAction() *SkbEditAction { } } +type PoliceAction struct { + ActionAttrs + Rate uint32 // in byte per second + Burst uint32 // in byte + RCellLog int + Mtu uint32 + Mpu uint16 // in byte + PeakRate uint32 // in byte per second + PCellLog int + AvRate uint32 // in byte per second + Overhead uint16 + LinkLayer int + ExceedAction TcPolAct + NotExceedAction TcPolAct +} + +func (action *PoliceAction) Type() string { + return "police" +} + +func (action *PoliceAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewPoliceAction() *PoliceAction { + return &PoliceAction{ + RCellLog: -1, + PCellLog: -1, + LinkLayer: 1, // ETHERNET + ExceedAction: TC_POLICE_RECLASSIFY, + NotExceedAction: TC_POLICE_OK, + } +} + // MatchAll filters match all packets type MatchAll struct { FilterAttrs @@ -275,20 +342,20 @@ func (filter *MatchAll) Type() string { return "matchall" } -type FilterFwAttrs struct { - ClassId uint32 - InDev string - Mask uint32 - Index uint32 - Buffer uint32 - Mtu uint32 - Mpu uint16 - Rate uint32 - AvRate uint32 - PeakRate uint32 - Action TcPolAct - Overhead uint16 - LinkLayer int +type FwFilter struct { + FilterAttrs + ClassId uint32 + InDev string + Mask uint32 + Police *PoliceAction +} + +func (filter *FwFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *FwFilter) Type() string { + return "fw" } type BpfFilter struct { diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index d5d45c8725a..4c6d1cf7d7f 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -51,76 +51,6 @@ func (filter *U32) Type() string { return "u32" } -// Fw filter filters on firewall marks -// NOTE: this is in filter_linux because it refers to nl.TcPolice which -// is defined in nl/tc_linux.go -type Fw struct { - FilterAttrs - ClassId uint32 - // TODO remove nl type from interface - Police nl.TcPolice - InDev string - // TODO Action - Mask uint32 - AvRate uint32 - Rtab [256]uint32 - Ptab [256]uint32 -} - -func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { - var rtab [256]uint32 - var ptab [256]uint32 - rcellLog := -1 - pcellLog := -1 - avrate := fattrs.AvRate / 8 - police := nl.TcPolice{} - police.Rate.Rate = fattrs.Rate / 8 - police.PeakRate.Rate = fattrs.PeakRate / 8 - buffer := fattrs.Buffer - linklayer := nl.LINKLAYER_ETHERNET - - if fattrs.LinkLayer != nl.LINKLAYER_UNSPEC { - linklayer = fattrs.LinkLayer - } - - police.Action = int32(fattrs.Action) - if police.Rate.Rate != 0 { - police.Rate.Mpu = fattrs.Mpu - police.Rate.Overhead = fattrs.Overhead - if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { - return nil, errors.New("TBF: failed to calculate rate table") - } - police.Burst = Xmittime(uint64(police.Rate.Rate), uint32(buffer)) - } - police.Mtu = fattrs.Mtu - if police.PeakRate.Rate != 0 { - police.PeakRate.Mpu = fattrs.Mpu - police.PeakRate.Overhead = fattrs.Overhead - if CalcRtable(&police.PeakRate, ptab[:], pcellLog, fattrs.Mtu, linklayer) < 0 { - return nil, errors.New("POLICE: failed to calculate peak rate table") - } - } - - return &Fw{ - FilterAttrs: attrs, - ClassId: fattrs.ClassId, - InDev: fattrs.InDev, - Mask: fattrs.Mask, - Police: police, - AvRate: avrate, - Rtab: rtab, - Ptab: ptab, - }, nil -} - -func (filter *Fw) Attrs() *FilterAttrs { - return &filter.FilterAttrs -} - -func (filter *Fw) Type() string { - return "fw" -} - type Flower struct { FilterAttrs DestIP net.IP @@ -362,7 +292,7 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if err := EncodeActions(actionsAttr, filter.Actions); err != nil { return err } - case *Fw: + case *FwFilter: if filter.Mask != 0 { b := make([]byte, 4) native.PutUint32(b, filter.Mask) @@ -371,17 +301,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error { if filter.InDev != "" { options.AddRtAttr(nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) } - if (filter.Police != nl.TcPolice{}) { - + if filter.Police != nil { police := options.AddRtAttr(nl.TCA_FW_POLICE, nil) - police.AddRtAttr(nl.TCA_POLICE_TBF, filter.Police.Serialize()) - if (filter.Police.Rate != nl.TcRateSpec{}) { - payload := SerializeRtab(filter.Rtab) - police.AddRtAttr(nl.TCA_POLICE_RATE, payload) - } - if (filter.Police.PeakRate != nl.TcRateSpec{}) { - payload := SerializeRtab(filter.Ptab) - police.AddRtAttr(nl.TCA_POLICE_PEAKRATE, payload) + if err := encodePolice(police, filter.Police); err != nil { + return err } } if filter.ClassId != 0 { @@ -479,7 +402,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { case "u32": filter = &U32{} case "fw": - filter = &Fw{} + filter = &FwFilter{} case "bpf": filter = &BpfFilter{} case "matchall": @@ -551,6 +474,53 @@ func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) { attrs.Bindcnt = int(tcgen.Bindcnt) } +func encodePolice(attr *nl.RtAttr, action *PoliceAction) error { + var rtab [256]uint32 + var ptab [256]uint32 + police := nl.TcPolice{} + police.Index = uint32(action.Attrs().Index) + police.Bindcnt = int32(action.Attrs().Bindcnt) + police.Capab = uint32(action.Attrs().Capab) + police.Refcnt = int32(action.Attrs().Refcnt) + police.Rate.Rate = action.Rate + police.PeakRate.Rate = action.PeakRate + police.Action = int32(action.ExceedAction) + + if police.Rate.Rate != 0 { + police.Rate.Mpu = action.Mpu + police.Rate.Overhead = action.Overhead + if CalcRtable(&police.Rate, rtab[:], action.RCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("TBF: failed to calculate rate table") + } + police.Burst = Xmittime(uint64(police.Rate.Rate), action.Burst) + } + + police.Mtu = action.Mtu + if police.PeakRate.Rate != 0 { + police.PeakRate.Mpu = action.Mpu + police.PeakRate.Overhead = action.Overhead + if CalcRtable(&police.PeakRate, ptab[:], action.PCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("POLICE: failed to calculate peak rate table") + } + } + + attr.AddRtAttr(nl.TCA_POLICE_TBF, police.Serialize()) + if police.Rate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RATE, SerializeRtab(rtab)) + } + if police.PeakRate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_PEAKRATE, SerializeRtab(ptab)) + } + if action.AvRate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_AVRATE, nl.Uint32Attr(action.AvRate)) + } + if action.NotExceedAction != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RESULT, nl.Uint32Attr(uint32(action.NotExceedAction))) + } + + return nil +} + func EncodeActions(attr *nl.RtAttr, actions []Action) error { tabIndex := int(nl.TCA_ACT_TAB) @@ -558,6 +528,14 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { switch action := action.(type) { default: return fmt.Errorf("unknown action type %s", action.Type()) + case *PoliceAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("police")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + if err := encodePolice(aopts, action); err != nil { + return err + } case *MirredAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -629,6 +607,16 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { } toTcGen(action.Attrs(), &connmark.TcGen) aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize()) + case *CsumAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("csum")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + csum := nl.TcCsum{ + UpdateFlags: uint32(action.UpdateFlags), + } + toTcGen(action.Attrs(), &csum.TcGen) + aopts.AddRtAttr(nl.TCA_CSUM_PARMS, csum.Serialize()) case *BpfAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -652,6 +640,29 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { return nil } +func parsePolice(data syscall.NetlinkRouteAttr, police *PoliceAction) { + switch data.Attr.Type { + case nl.TCA_POLICE_RESULT: + police.NotExceedAction = TcPolAct(native.Uint32(data.Value[0:4])) + case nl.TCA_POLICE_AVRATE: + police.AvRate = native.Uint32(data.Value[0:4]) + case nl.TCA_POLICE_TBF: + p := *nl.DeserializeTcPolice(data.Value) + police.ActionAttrs = ActionAttrs{} + police.Attrs().Index = int(p.Index) + police.Attrs().Bindcnt = int(p.Bindcnt) + police.Attrs().Capab = int(p.Capab) + police.Attrs().Refcnt = int(p.Refcnt) + police.ExceedAction = TcPolAct(p.Action) + police.Rate = p.Rate.Rate + police.PeakRate = p.PeakRate.Rate + police.Burst = Xmitsize(uint64(p.Rate.Rate), p.Burst) + police.Mtu = p.Mtu + police.LinkLayer = int(p.Rate.Linklayer) & nl.TC_LINKLAYER_MASK + police.Overhead = p.Rate.Overhead + } +} + func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { var actions []Action for _, table := range tables { @@ -674,12 +685,16 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &BpfAction{} case "connmark": action = &ConnmarkAction{} + case "csum": + action = &CsumAction{} case "gact": action = &GenericAction{} case "tunnel_key": action = &TunnelKeyAction{} case "skbedit": action = &SkbEditAction{} + case "police": + action = &PoliceAction{} default: break nextattr } @@ -752,12 +767,22 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { toAttrs(&connmark.TcGen, action.Attrs()) action.(*ConnmarkAction).Zone = connmark.Zone } + case "csum": + switch adatum.Attr.Type { + case nl.TCA_CSUM_PARMS: + csum := *nl.DeserializeTcCsum(adatum.Value) + action.(*CsumAction).ActionAttrs = ActionAttrs{} + toAttrs(&csum.TcGen, action.Attrs()) + action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags) + } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: gen := *nl.DeserializeTcGen(adatum.Value) toAttrs(&gen, action.Attrs()) } + case "police": + parsePolice(adatum, action.(*PoliceAction)) } } } @@ -813,7 +838,7 @@ func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) } func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { - fw := filter.(*Fw) + fw := filter.(*FwFilter) detailed := true for _, datum := range data { switch datum.Attr.Type { @@ -824,17 +849,12 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { case nl.TCA_FW_INDEV: fw.InDev = string(datum.Value[:len(datum.Value)-1]) case nl.TCA_FW_POLICE: + var police PoliceAction adata, _ := nl.ParseRouteAttr(datum.Value) for _, aattr := range adata { - switch aattr.Attr.Type { - case nl.TCA_POLICE_TBF: - fw.Police = *nl.DeserializeTcPolice(aattr.Value) - case nl.TCA_POLICE_RATE: - fw.Rtab = DeserializeRtab(aattr.Value) - case nl.TCA_POLICE_PEAKRATE: - fw.Ptab = DeserializeRtab(aattr.Value) - } + parsePolice(aattr, &police) } + fw.Police = &police } } return detailed, nil @@ -859,7 +879,7 @@ func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) case nl.TCA_BPF_ID: bpf.Id = int(native.Uint32(datum.Value[0:4])) case nl.TCA_BPF_TAG: - bpf.Tag = hex.EncodeToString(datum.Value[:len(datum.Value)-1]) + bpf.Tag = hex.EncodeToString(datum.Value) } } return detailed, nil diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go index c02bfb7e671..4cb01168781 100644 --- a/vendor/github.com/vishvananda/netlink/handle_linux.go +++ b/vendor/github.com/vishvananda/netlink/handle_linux.go @@ -107,6 +107,21 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) { return results, nil } +// SetStrictCheck sets the strict check socket option for each socket in the netlink handle. Returns early if any set operation fails +func (h *Handle) SetStrictCheck(state bool) error { + for _, sh := range h.sockets { + var stateInt int = 0 + if state { + stateInt = 1 + } + err := unix.SetsockoptInt(sh.Socket.GetFd(), unix.SOL_NETLINK, unix.NETLINK_GET_STRICT_CHK, stateInt) + if err != nil { + return err + } + } + return nil +} + // NewHandleAt returns a netlink handle on the network namespace // specified by ns. If ns=netns.None(), current network namespace // will be assumed diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index bdcb994ca04..33c872336d6 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -1038,6 +1038,7 @@ type Iptun struct { EncapType uint16 EncapFlags uint16 FlowBased bool + Proto uint8 } func (iptun *Iptun) Attrs() *LinkAttrs { @@ -1073,6 +1074,37 @@ func (ip6tnl *Ip6tnl) Type() string { return "ip6tnl" } +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L84 +type TunnelEncapType uint16 + +const ( + None TunnelEncapType = iota + FOU + GUE +) + +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L91 +type TunnelEncapFlag uint16 + +const ( + CSum TunnelEncapFlag = 1 << 0 + CSum6 = 1 << 1 + RemCSum = 1 << 2 +) + +// from https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ip6_tunnel.h#L12 +type IP6TunnelFlag uint16 + +const ( + IP6_TNL_F_IGN_ENCAP_LIMIT IP6TunnelFlag = 1 // don't add encapsulation limit if one isn't present in inner packet + IP6_TNL_F_USE_ORIG_TCLASS = 2 // copy the traffic class field from the inner packet + IP6_TNL_F_USE_ORIG_FLOWLABEL = 4 // copy the flowlabel from the inner packet + IP6_TNL_F_MIP6_DEV = 8 // being used for Mobile IPv6 + IP6_TNL_F_RCV_DSCP_COPY = 10 // copy DSCP from the outer packet + IP6_TNL_F_USE_ORIG_FWMARK = 20 // copy fwmark from inner packet + IP6_TNL_F_ALLOW_LOCAL_REMOTE = 40 // allow remote endpoint on the local node +) + type Sittun struct { LinkAttrs Link uint32 diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index aa998e31131..276947a0069 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -2874,6 +2874,7 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(iptun.Proto)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2904,6 +2905,8 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_IPTUN_COLLECT_METADATA: iptun.FlowBased = true + case nl.IFLA_IPTUN_PROTO: + iptun.Proto = datum.Value[0] } } } @@ -3226,8 +3229,9 @@ func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) VfInfo { func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex))) - data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) - + if xfrmi.Ifid != 0 { + data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) + } } func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index dcd4b94695b..600b942b178 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -27,7 +27,8 @@ const ( // tc rules or filters, or other more memory requiring data. RECEIVE_BUFFER_SIZE = 65536 // Kernel netlink pid - PidKernel uint32 = 0 + PidKernel uint32 = 0 + SizeofCnMsgOp = 0x18 ) // SupportedNlFamilies contains the list of netlink families this netlink package supports @@ -38,6 +39,9 @@ var nextSeqNr uint32 // Default netlink socket timeout, 60s var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0} +// ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets +var EnableErrorMessageReporting bool = false + // GetIPFamily returns the family type of a net.IP. func GetIPFamily(ip net.IP) int { if len(ip) <= net.IPv4len { @@ -80,11 +84,69 @@ func Swap32(i uint32) uint32 { return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24 } +const ( + NLMSGERR_ATTR_UNUSED = 0 + NLMSGERR_ATTR_MSG = 1 + NLMSGERR_ATTR_OFFS = 2 + NLMSGERR_ATTR_COOKIE = 3 + NLMSGERR_ATTR_POLICY = 4 +) + type NetlinkRequestData interface { Len() int Serialize() []byte } +const ( + PROC_CN_MCAST_LISTEN = 1 + PROC_CN_MCAST_IGNORE +) + +type CbID struct { + Idx uint32 + Val uint32 +} + +type CnMsg struct { + ID CbID + Seq uint32 + Ack uint32 + Length uint16 + Flags uint16 +} + +type CnMsgOp struct { + CnMsg + // here we differ from the C header + Op uint32 +} + +func NewCnMsg(idx, val, op uint32) *CnMsgOp { + var cm CnMsgOp + + cm.ID.Idx = idx + cm.ID.Val = val + + cm.Ack = 0 + cm.Seq = 1 + cm.Length = uint16(binary.Size(op)) + cm.Op = op + + return &cm +} + +func (msg *CnMsgOp) Serialize() []byte { + return (*(*[SizeofCnMsgOp]byte)(unsafe.Pointer(msg)))[:] +} + +func DeserializeCnMsgOp(b []byte) *CnMsgOp { + return (*CnMsgOp)(unsafe.Pointer(&b[0:SizeofCnMsgOp][0])) +} + +func (msg *CnMsgOp) Len() int { + return SizeofCnMsgOp +} + // IfInfomsg is related to links, but it is used for list requests as well type IfInfomsg struct { unix.IfInfomsg @@ -252,6 +314,12 @@ func (msg *IfInfomsg) EncapType() string { return fmt.Sprintf("unknown%d", msg.Type) } +// Round the length of a netlink message up to align it properly. +// Taken from syscall/netlink_linux.go by The Go Authors under BSD-style license. +func nlmAlignOf(msglen int) int { + return (msglen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1) +} + func rtaAlignOf(attrlen int) int { return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) } @@ -436,6 +504,11 @@ func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, erro if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil { return nil, err } + if EnableErrorMessageReporting { + if err := s.SetExtAck(true); err != nil { + return nil, err + } + } defer s.Close() } else { @@ -475,11 +548,37 @@ done: } if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { native := NativeEndian() - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { + errno := int32(native.Uint32(m.Data[0:4])) + if errno == 0 { break done } - return nil, syscall.Errno(-error) + var err error + err = syscall.Errno(-errno) + + unreadData := m.Data[4:] + if m.Header.Flags|unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { + // Skip the echoed request message. + echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0])) + unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):] + + // Annotate `err` using nlmsgerr attributes. + for len(unreadData) >= syscall.SizeofRtAttr { + attr := (*syscall.RtAttr)(unsafe.Pointer(&unreadData[0])) + attrData := unreadData[syscall.SizeofRtAttr:attr.Len] + + switch attr.Type { + case NLMSGERR_ATTR_MSG: + err = fmt.Errorf("%w: %s", err, string(attrData)) + + default: + // TODO: handle other NLMSGERR_ATTR types + } + + unreadData = unreadData[rtaAlignOf(int(attr.Len)):] + } + } + + return nil, err } if resType != 0 && m.Header.Type != resType { continue @@ -694,6 +793,16 @@ func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) } +// SetExtAck requests error messages to be reported on the socket +func (s *NetlinkSocket) SetExtAck(enable bool) error { + var enableN int + if enable { + enableN = 1 + } + + return unix.SetsockoptInt(int(s.fd), unix.SOL_NETLINK, unix.NETLINK_EXT_ACK, enableN) +} + func (s *NetlinkSocket) GetPid() (uint32, error) { fd := int(atomic.LoadInt32(&s.fd)) lsa, err := unix.Getsockname(fd) diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index 002beda9274..eb05ff1cd1c 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -90,6 +90,7 @@ const ( SizeofTcU32Sel = 0x10 // without keys SizeofTcGen = 0x14 SizeofTcConnmark = SizeofTcGen + 0x04 + SizeofTcCsum = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 SizeofTcTunnelKey = SizeofTcGen + 0x04 SizeofTcSkbEdit = SizeofTcGen @@ -694,6 +695,36 @@ func (x *TcConnmark) Serialize() []byte { return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:] } +const ( + TCA_CSUM_UNSPEC = iota + TCA_CSUM_PARMS + TCA_CSUM_TM + TCA_CSUM_PAD + TCA_CSUM_MAX = TCA_CSUM_PAD +) + +// struct tc_csum { +// tc_gen; +// __u32 update_flags; +// } + +type TcCsum struct { + TcGen + UpdateFlags uint32 +} + +func (msg *TcCsum) Len() int { + return SizeofTcCsum +} + +func DeserializeTcCsum(b []byte) *TcCsum { + return (*TcCsum)(unsafe.Pointer(&b[0:SizeofTcCsum][0])) +} + +func (x *TcCsum) Serialize() []byte { + return (*(*[SizeofTcCsum]byte)(unsafe.Pointer(x)))[:] +} + const ( TCA_ACT_MIRRED = 8 ) diff --git a/vendor/github.com/vishvananda/netlink/proc_event_linux.go b/vendor/github.com/vishvananda/netlink/proc_event_linux.go new file mode 100644 index 00000000000..53bc59a6ecf --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/proc_event_linux.go @@ -0,0 +1,217 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +const CN_IDX_PROC = 0x1 + +const ( + PROC_EVENT_NONE = 0x00000000 + PROC_EVENT_FORK = 0x00000001 + PROC_EVENT_EXEC = 0x00000002 + PROC_EVENT_UID = 0x00000004 + PROC_EVENT_GID = 0x00000040 + PROC_EVENT_SID = 0x00000080 + PROC_EVENT_PTRACE = 0x00000100 + PROC_EVENT_COMM = 0x00000200 + PROC_EVENT_COREDUMP = 0x40000000 + PROC_EVENT_EXIT = 0x80000000 +) + +const ( + CN_VAL_PROC = 0x1 + PROC_CN_MCAST_LISTEN = 0x1 +) + +type ProcEventMsg interface { + Pid() uint32 + Tgid() uint32 +} + +type ProcEventHeader struct { + What uint32 + CPU uint32 + Timestamp uint64 +} + +type ProcEvent struct { + ProcEventHeader + Msg ProcEventMsg +} + +func (pe *ProcEvent) setHeader(h ProcEventHeader) { + pe.What = h.What + pe.CPU = h.CPU + pe.Timestamp = h.Timestamp +} + +type ExitProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 + ParentPid uint32 + ParentTgid uint32 +} + +type ExitProcEvent2 struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 + ParentPid uint32 + ParentTgid uint32 +} + +func (e *ExitProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExitProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ExecProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 +} + +func (e *ExecProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExecProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ForkProcEvent struct { + ParentPid uint32 + ParentTgid uint32 + ChildPid uint32 + ChildTgid uint32 +} + +func (e *ForkProcEvent) Pid() uint32 { + return e.ParentPid +} + +func (e *ForkProcEvent) Tgid() uint32 { + return e.ParentTgid +} + +type CommProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + Comm [16]byte +} + +func (e *CommProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *CommProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +func ProcEventMonitor(ch chan<- ProcEvent, done <-chan struct{}, errorChan chan<- error) error { + h, err := NewHandle() + if err != nil { + return err + } + defer h.Delete() + + s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_CONNECTOR, CN_IDX_PROC) + if err != nil { + return err + } + + var nlmsg nl.NetlinkRequest + + nlmsg.Pid = uint32(os.Getpid()) + nlmsg.Type = unix.NLMSG_DONE + nlmsg.Len = uint32(unix.SizeofNlMsghdr) + + cm := nl.NewCnMsg(CN_IDX_PROC, CN_VAL_PROC, PROC_CN_MCAST_LISTEN) + nlmsg.AddData(cm) + + s.Send(&nlmsg) + + if done != nil { + go func() { + <-done + s.Close() + }() + } + + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + errorChan <- err + return + } + if from.Pid != nl.PidKernel { + errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return + } + + for _, m := range msgs { + e := parseNetlinkMessage(m) + if e != nil { + ch <- *e + } + } + + } + }() + + return nil +} + +func parseNetlinkMessage(m syscall.NetlinkMessage) *ProcEvent { + if m.Header.Type == unix.NLMSG_DONE { + buf := bytes.NewBuffer(m.Data) + msg := &nl.CnMsg{} + hdr := &ProcEventHeader{} + binary.Read(buf, nl.NativeEndian(), msg) + binary.Read(buf, nl.NativeEndian(), hdr) + + pe := &ProcEvent{} + pe.setHeader(*hdr) + switch hdr.What { + case PROC_EVENT_EXIT: + event := &ExitProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_FORK: + event := &ForkProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_EXEC: + event := &ExecProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_COMM: + event := &CommProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + } + return nil + } + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 9fc4b3d241e..e182e1cfe67 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -706,3 +706,7 @@ func Xmittime(rate uint64, size uint32) uint32 { // https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/tc/tc_core.c#n62 return time2Tick(uint32(TIME_UNITS_PER_SEC * (float64(size) / float64(rate)))) } + +func Xmitsize(rate uint64, ticks uint32) uint32 { + return uint32((float64(rate) * float64(tick2Time(ticks))) / TIME_UNITS_PER_SEC) +} diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index ffad5d3981d..79cc218ec81 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -11,6 +11,24 @@ type Scope uint8 type NextHopFlag int +const ( + RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) + RT_FILTER_SCOPE + RT_FILTER_TYPE + RT_FILTER_TOS + RT_FILTER_IIF + RT_FILTER_OIF + RT_FILTER_DST + RT_FILTER_SRC + RT_FILTER_GW + RT_FILTER_TABLE + RT_FILTER_HOPLIMIT + RT_FILTER_PRIORITY + RT_FILTER_MARK + RT_FILTER_MASK + RT_FILTER_REALM +) + type Destination interface { Family() int Decode([]byte) error diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index b059d4a9eaa..8da8866573c 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -41,23 +41,6 @@ func (s Scope) String() string { } } -const ( - RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) - RT_FILTER_SCOPE - RT_FILTER_TYPE - RT_FILTER_TOS - RT_FILTER_IIF - RT_FILTER_OIF - RT_FILTER_DST - RT_FILTER_SRC - RT_FILTER_GW - RT_FILTER_TABLE - RT_FILTER_HOPLIMIT - RT_FILTER_PRIORITY - RT_FILTER_MARK - RT_FILTER_MASK - RT_FILTER_REALM -) const ( FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK @@ -1030,8 +1013,9 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) - infmsg := nl.NewIfInfomsg(family) - req.AddData(infmsg) + rtmsg := nl.NewRtMsg() + rtmsg.Family = uint8(family) + req.AddData(rtmsg) msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) if err != nil { diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go index 9b7b0af49e4..53cd3d4f6ac 100644 --- a/vendor/github.com/vishvananda/netlink/rule.go +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -25,6 +25,7 @@ type Rule struct { Invert bool Dport *RulePortRange Sport *RulePortRange + IPProto int } func (r Rule) String() string { diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index 9c426cbd31d..3ae2138808e 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -152,6 +152,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } + if rule.IPProto > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.IPProto)) + req.AddData(nl.NewRtAttr(nl.FRA_IP_PROTO, b)) + } + if rule.Dport != nil { b := rule.Dport.toRtAttrData() req.AddData(nl.NewRtAttr(nl.FRA_DPORT_RANGE, b)) @@ -250,6 +256,8 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_PRIORITY: rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_IP_PROTO: + rule.IPProto = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_DPORT_RANGE: rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) case nl.FRA_SPORT_RANGE: diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index 694bd74e6d0..35849680413 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -93,8 +93,10 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) - req.AddData(ifId) + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -189,8 +191,10 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) - req.AddData(ifId) + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } resType := nl.XFRM_MSG_NEWPOLICY if nlProto == nl.XFRM_MSG_DELPOLICY { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 3b37b87d38d..61a2d2dea28 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -167,8 +167,10 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { } } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) - req.AddData(ifId) + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } _, err := req.Execute(unix.NETLINK_XFRM, 0) return err @@ -281,8 +283,10 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState req.AddData(out) } - ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) - req.AddData(ifId) + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } resType := nl.XFRM_MSG_NEWSA if nlProto == nl.XFRM_MSG_DELSA { diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go deleted file mode 100644 index a4d1919a9e7..00000000000 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Deprecated: this package moved to golang.org/x/term. -package terminal - -import ( - "io" - - "golang.org/x/term" -) - -// EscapeCodes contains escape sequences that can be written to the terminal in -// order to achieve different styles of text. -type EscapeCodes = term.EscapeCodes - -// Terminal contains the state for running a VT100 terminal that is capable of -// reading lines of input. -type Terminal = term.Terminal - -// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is -// a local terminal, that terminal must first have been put into raw mode. -// prompt is a string that is written at the start of each input line (i.e. -// "> "). -func NewTerminal(c io.ReadWriter, prompt string) *Terminal { - return term.NewTerminal(c, prompt) -} - -// ErrPasteIndicator may be returned from ReadLine as the error, in addition -// to valid line data. It indicates that bracketed paste mode is enabled and -// that the returned line consists only of pasted data. Programs may wish to -// interpret pasted data more literally than typed data. -var ErrPasteIndicator = term.ErrPasteIndicator - -// State contains the state of a terminal. -type State = term.State - -// IsTerminal returns whether the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - return term.IsTerminal(fd) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - return term.ReadPassword(fd) -} - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - return term.MakeRaw(fd) -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func Restore(fd int, oldState *State) error { - return term.Restore(fd, oldState) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - return term.GetState(fd) -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - return term.GetSize(fd) -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 2d859af8ff8..47524a61a5d 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -315,6 +315,20 @@ type ServeConnOpts struct { // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler http.Handler + + // UpgradeRequest is an initial request received on a connection + // undergoing an h2c upgrade. The request body must have been + // completely read from the connection before calling ServeConn, + // and the 101 Switching Protocols response written. + UpgradeRequest *http.Request + + // Settings is the decoded contents of the HTTP2-Settings header + // in an h2c upgrade request. + Settings []byte + + // SawClientPreface is set if the HTTP/2 connection preface + // has already been read from the connection. + SawClientPreface bool } func (o *ServeConnOpts) context() context.Context { @@ -383,6 +397,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, + sawClientPreface: opts.SawClientPreface, } s.state.registerConn(sc) @@ -465,9 +480,27 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } } + if opts.Settings != nil { + fr := &SettingsFrame{ + FrameHeader: FrameHeader{valid: true}, + p: opts.Settings, + } + if err := fr.ForeachSetting(sc.processSetting); err != nil { + sc.rejectConn(ErrCodeProtocol, "invalid settings") + return + } + opts.Settings = nil + } + if hook := testHookGetServerConn; hook != nil { hook(sc) } + + if opts.UpgradeRequest != nil { + sc.upgradeRequest(opts.UpgradeRequest) + opts.UpgradeRequest = nil + } + sc.serve() } @@ -512,6 +545,7 @@ type serverConn struct { // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool + sawClientPreface bool // preface has already been read, used in h2c upgrade sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? @@ -974,6 +1008,9 @@ var errPrefaceTimeout = errors.New("timeout waiting for client preface") // returns errPrefaceTimeout on timeout, or an error if the greeting // is invalid. func (sc *serverConn) readPreface() error { + if sc.sawClientPreface { + return nil + } errc := make(chan error, 1) go func() { // Read the client preface @@ -1915,6 +1952,26 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { return nil } +func (sc *serverConn) upgradeRequest(req *http.Request) { + sc.serveG.check() + id := uint32(1) + sc.maxClientStreamID = id + st := sc.newStream(id, 0, stateHalfClosedRemote) + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + rw := sc.newResponseWriter(st, req) + + // Disable any read deadline set by the net/http package + // prior to the upgrade. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) +} + func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() @@ -2145,6 +2202,11 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r } req = req.WithContext(st.ctx) + rw := sc.newResponseWriter(st, req) + return rw, req, nil +} + +func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter { rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields @@ -2153,10 +2215,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r rws.bw.Reset(chunkWriter{rws}) rws.stream = st rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil + return &responseWriter{rws: rws} } // Run on its own goroutine. @@ -2371,7 +2430,6 @@ type responseWriterState struct { // immutable within a request: stream *stream req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 2618b2c11d2..0a242c669e2 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -383,16 +383,15 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { var n *priorityNode - if id := wr.StreamID(); id == 0 { + if wr.isControl() { n = &ws.root } else { + id := wr.StreamID() n = ws.nodes[id] if n == nil { // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. However, wr can be a RST_STREAM. In this case, we - // push wr onto the root, rather than creating a new priorityNode, - // since RST_STREAM is tiny and the stream's priority is unknown - // anyway. See issue #17919. + // DATA frame. In other case, we push wr onto the root, rather + // than creating a new priorityNode. if wr.DataSize() > 0 { panic("add DATA on non-open stream") } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 1eab2fdf933..4c0850a45aa 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -17,7 +17,8 @@ type token struct{} // A Group is a collection of goroutines working on subtasks that are part of // the same overall task. // -// A zero Group is valid and does not cancel on error. +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. type Group struct { cancel func() diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 5c2003cec65..932996c75b6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -618,6 +618,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index d12f4fbfea5..fdf53f8daf3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -66,6 +66,7 @@ import ( //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" //go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_getsid getsid "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" @@ -202,6 +203,7 @@ import ( //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit //go:linkname procGetrusage libc_getrusage +//go:linkname procGetsid libc_getsid //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill @@ -339,6 +341,7 @@ var ( procGetpriority, procGetrlimit, procGetrusage, + procGetsid, procGettimeofday, procGetuid, procKill, @@ -1044,6 +1047,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + sid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/gopkg.in/fsnotify.v1/.editorconfig b/vendor/gopkg.in/fsnotify.v1/.editorconfig deleted file mode 100644 index ba49e3c2349..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/.editorconfig +++ /dev/null @@ -1,5 +0,0 @@ -root = true - -[*] -indent_style = tab -indent_size = 4 diff --git a/vendor/gopkg.in/fsnotify.v1/.gitignore b/vendor/gopkg.in/fsnotify.v1/.gitignore deleted file mode 100644 index 4cd0cbaf432..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/vendor/gopkg.in/fsnotify.v1/.travis.yml b/vendor/gopkg.in/fsnotify.v1/.travis.yml deleted file mode 100644 index 981d1bb8132..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -sudo: false -language: go - -go: - - 1.8.x - - 1.9.x - - tip - -matrix: - allow_failures: - - go: tip - fast_finish: true - -before_script: - - go get -u github.com/golang/lint/golint - -script: - - go test -v --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - test -z "$(golint ./... | tee /dev/stderr)" - - go vet ./... - -os: - - linux - - osx - -notifications: - email: false diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify.v1/AUTHORS deleted file mode 100644 index 5ab5d41c547..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md deleted file mode 100644 index be4d7ea2c14..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md +++ /dev/null @@ -1,317 +0,0 @@ -# Changelog - -## v1.4.7 / 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## v1.4.2 / 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## v1.4.1 / 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## v1.4.0 / 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## v1.3.1 / 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## v1.3.0 / 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## v1.2.10 / 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## v1.2.9 / 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## v1.2.8 / 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## v1.2.5 / 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## v1.2.1 / 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md deleted file mode 100644 index 828a60b24ba..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/gopkg.in/fsnotify.v1/LICENSE b/vendor/gopkg.in/fsnotify.v1/LICENSE deleted file mode 100644 index f21e5408009..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/gopkg.in/fsnotify.v1/README.md deleted file mode 100644 index 3993207413a..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# File system notifications for Go - -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) - -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: - -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - -## FAQ - -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 - -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md - -## Related Projects - -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) - diff --git a/vendor/gopkg.in/fsnotify.v1/fen.go b/vendor/gopkg.in/fsnotify.v1/fen.go deleted file mode 100644 index ced39cb881e..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify.v1/fsnotify.go deleted file mode 100644 index 190bf0de575..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/fsnotify.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "errors" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher -var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify.v1/inotify.go deleted file mode 100644 index d9fd1b88a05..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/inotify.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go deleted file mode 100644 index cc7db4b22ef..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(0) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify.v1/kqueue.go deleted file mode 100644 index 86e76a3d676..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/kqueue.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go deleted file mode 100644 index 7d8de14513e..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go deleted file mode 100644 index 9139e17161b..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY diff --git a/vendor/gopkg.in/fsnotify.v1/windows.go b/vendor/gopkg.in/fsnotify.v1/windows.go deleted file mode 100644 index 09436f31d82..00000000000 --- a/vendor/gopkg.in/fsnotify.v1/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/modules.txt b/vendor/modules.txt index cb19532f807..707d967878c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# capnproto.org/go/capnp/v3 v3.0.0-20220504220541-7329fad9c27b +# capnproto.org/go/capnp/v3 v3.0.0-alpha.3 ## explicit; go 1.18 capnproto.org/go/capnp/v3 capnproto.org/go/capnp/v3/encoding/text @@ -52,7 +52,7 @@ github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion -# github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 +# github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5 ## explicit; go 1.13 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool @@ -85,6 +85,8 @@ github.com/acarl005/stripansi # github.com/acomagu/bufpipe v1.0.3 ## explicit; go 1.12 github.com/acomagu/bufpipe +# github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be +## explicit; go 1.13 # github.com/aws/aws-sdk-go v1.43.30 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws @@ -145,13 +147,15 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.1.2 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/checkpoint-restore/checkpointctl v0.0.0-20210922093614-c31748bec9f2 +# github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0 ## explicit; go 1.15 github.com/checkpoint-restore/checkpointctl/lib # github.com/checkpoint-restore/go-criu/v5 v5.3.0 ## explicit; go 1.13 github.com/checkpoint-restore/go-criu/v5 +github.com/checkpoint-restore/go-criu/v5/magic github.com/checkpoint-restore/go-criu/v5/rpc +github.com/checkpoint-restore/go-criu/v5/stats # github.com/cheggaaa/pb/v3 v3.0.8 ## explicit; go 1.12 github.com/cheggaaa/pb/v3 @@ -169,7 +173,6 @@ github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link # github.com/container-orchestrated-devices/container-device-interface v0.4.0 ## explicit; go 1.17 -github.com/container-orchestrated-devices/container-device-interface/pkg github.com/container-orchestrated-devices/container-device-interface/pkg/cdi github.com/container-orchestrated-devices/container-device-interface/specs-go # github.com/containerd/cgroups v1.0.4 @@ -178,8 +181,8 @@ github.com/containerd/cgroups/stats/v1 # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.5.8 -## explicit; go 1.16 +# github.com/containerd/containerd v1.6.4 +## explicit; go 1.17 github.com/containerd/containerd/api/services/ttrpc/events/v1 github.com/containerd/containerd/api/types github.com/containerd/containerd/api/types/task @@ -187,6 +190,8 @@ github.com/containerd/containerd/cio github.com/containerd/containerd/defaults github.com/containerd/containerd/errdefs github.com/containerd/containerd/events +github.com/containerd/containerd/events/exchange +github.com/containerd/containerd/filters github.com/containerd/containerd/identifiers github.com/containerd/containerd/log github.com/containerd/containerd/namespaces @@ -195,9 +200,11 @@ github.com/containerd/containerd/pkg/cri/io github.com/containerd/containerd/pkg/cri/util github.com/containerd/containerd/pkg/dialer github.com/containerd/containerd/pkg/ioutil +github.com/containerd/containerd/pkg/shutdown github.com/containerd/containerd/pkg/ttrpcutil github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms +github.com/containerd/containerd/plugin github.com/containerd/containerd/reference/docker github.com/containerd/containerd/runtime/v2/shim github.com/containerd/containerd/runtime/v2/task @@ -213,7 +220,7 @@ github.com/containerd/fifo # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc -# github.com/containerd/stargz-snapshotter/estargz v0.10.1 +# github.com/containerd/stargz-snapshotter/estargz v0.11.4 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil @@ -238,8 +245,8 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.1.1 ## explicit; go 1.17 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.23.1 -## explicit; go 1.13 +# github.com/containers/buildah v1.26.1 +## explicit; go 1.16 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -247,6 +254,9 @@ github.com/containers/buildah/copier github.com/containers/buildah/define github.com/containers/buildah/docker github.com/containers/buildah/imagebuildah +github.com/containers/buildah/internal +github.com/containers/buildah/internal/parse +github.com/containers/buildah/internal/util github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/overlay @@ -254,19 +264,30 @@ github.com/containers/buildah/pkg/parse github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/util -# github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f -## explicit; go 1.15 +# github.com/containers/common v0.48.0 +## explicit; go 1.16 github.com/containers/common/libimage github.com/containers/common/libimage/manifests +github.com/containers/common/libnetwork/cni +github.com/containers/common/libnetwork/etchosts +github.com/containers/common/libnetwork/internal/util +github.com/containers/common/libnetwork/netavark +github.com/containers/common/libnetwork/network +github.com/containers/common/libnetwork/types +github.com/containers/common/libnetwork/util github.com/containers/common/pkg/apparmor github.com/containers/common/pkg/apparmor/internal/supported github.com/containers/common/pkg/capabilities +github.com/containers/common/pkg/cgroups github.com/containers/common/pkg/cgroupv2 github.com/containers/common/pkg/chown github.com/containers/common/pkg/config -github.com/containers/common/pkg/defaultnet +github.com/containers/common/pkg/download github.com/containers/common/pkg/filters +github.com/containers/common/pkg/flag +github.com/containers/common/pkg/machine github.com/containers/common/pkg/manifests +github.com/containers/common/pkg/netns github.com/containers/common/pkg/parse github.com/containers/common/pkg/retry github.com/containers/common/pkg/seccomp @@ -280,16 +301,17 @@ github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/sysinfo github.com/containers/common/pkg/timetype github.com/containers/common/pkg/umask +github.com/containers/common/pkg/util github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/conmon-rs v0.0.0-20220505150146-7d0ae00b625c +# github.com/containers/conmon-rs v0.0.0-20220609131637-836ba11bf0aa ## explicit; go 1.18 github.com/containers/conmon-rs/internal/proto github.com/containers/conmon-rs/pkg/client -# github.com/containers/image/v5 v5.17.0 -## explicit; go 1.13 +# github.com/containers/image/v5 v5.21.1 +## explicit; go 1.16 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -302,14 +324,15 @@ github.com/containers/image/v5/docker/reference github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image github.com/containers/image/v5/internal/blobinfocache +github.com/containers/image/v5/internal/imagedestination +github.com/containers/image/v5/internal/imagesource github.com/containers/image/v5/internal/iolimits -github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform +github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/rootless github.com/containers/image/v5/internal/streamdigest github.com/containers/image/v5/internal/tmpdir -github.com/containers/image/v5/internal/types github.com/containers/image/v5/internal/uploadreader github.com/containers/image/v5/manifest github.com/containers/image/v5/oci/archive @@ -317,6 +340,7 @@ github.com/containers/image/v5/oci/internal github.com/containers/image/v5/oci/layout github.com/containers/image/v5/openshift github.com/containers/image/v5/ostree +github.com/containers/image/v5/pkg/blobcache github.com/containers/image/v5/pkg/blobinfocache github.com/containers/image/v5/pkg/blobinfocache/boltdb github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize @@ -330,6 +354,7 @@ github.com/containers/image/v5/pkg/shortnames github.com/containers/image/v5/pkg/strslice github.com/containers/image/v5/pkg/sysregistriesv2 github.com/containers/image/v5/pkg/tlsclientconfig +github.com/containers/image/v5/sif github.com/containers/image/v5/signature github.com/containers/image/v5/storage github.com/containers/image/v5/tarball @@ -337,7 +362,7 @@ github.com/containers/image/v5/transports github.com/containers/image/v5/transports/alltransports github.com/containers/image/v5/types github.com/containers/image/v5/version -# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b +# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a ## explicit github.com/containers/libtrust # github.com/containers/ocicrypt v1.1.5 @@ -356,63 +381,66 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d -## explicit; go 1.13 -github.com/containers/podman/v3/libpod -github.com/containers/podman/v3/libpod/define -github.com/containers/podman/v3/libpod/driver -github.com/containers/podman/v3/libpod/events -github.com/containers/podman/v3/libpod/layers -github.com/containers/podman/v3/libpod/linkmode -github.com/containers/podman/v3/libpod/lock -github.com/containers/podman/v3/libpod/lock/file -github.com/containers/podman/v3/libpod/lock/shm -github.com/containers/podman/v3/libpod/logs -github.com/containers/podman/v3/libpod/logs/reversereader -github.com/containers/podman/v3/libpod/network/cni -github.com/containers/podman/v3/libpod/network/types -github.com/containers/podman/v3/libpod/network/util -github.com/containers/podman/v3/libpod/plugin -github.com/containers/podman/v3/libpod/shutdown -github.com/containers/podman/v3/pkg/annotations -github.com/containers/podman/v3/pkg/cgroups -github.com/containers/podman/v3/pkg/checkpoint/crutils -github.com/containers/podman/v3/pkg/copy -github.com/containers/podman/v3/pkg/criu -github.com/containers/podman/v3/pkg/ctime -github.com/containers/podman/v3/pkg/domain/entities -github.com/containers/podman/v3/pkg/domain/entities/reports -github.com/containers/podman/v3/pkg/domain/entities/types -github.com/containers/podman/v3/pkg/env -github.com/containers/podman/v3/pkg/errorhandling -github.com/containers/podman/v3/pkg/hooks -github.com/containers/podman/v3/pkg/hooks/0.1.0 -github.com/containers/podman/v3/pkg/hooks/1.0.0 -github.com/containers/podman/v3/pkg/hooks/exec -github.com/containers/podman/v3/pkg/inspect -github.com/containers/podman/v3/pkg/kubeutils -github.com/containers/podman/v3/pkg/lookup -github.com/containers/podman/v3/pkg/namespaces -github.com/containers/podman/v3/pkg/netns -github.com/containers/podman/v3/pkg/parallel -github.com/containers/podman/v3/pkg/ps/define -github.com/containers/podman/v3/pkg/resolvconf -github.com/containers/podman/v3/pkg/resolvconf/dns -github.com/containers/podman/v3/pkg/rootless -github.com/containers/podman/v3/pkg/rootlessport -github.com/containers/podman/v3/pkg/seccomp -github.com/containers/podman/v3/pkg/selinux -github.com/containers/podman/v3/pkg/servicereaper -github.com/containers/podman/v3/pkg/signal -github.com/containers/podman/v3/pkg/specgen -github.com/containers/podman/v3/pkg/specgen/generate -github.com/containers/podman/v3/pkg/systemd -github.com/containers/podman/v3/pkg/timetype -github.com/containers/podman/v3/pkg/trust -github.com/containers/podman/v3/pkg/util -github.com/containers/podman/v3/utils -github.com/containers/podman/v3/version -# github.com/containers/psgo v1.7.1 +# github.com/containers/podman/v4 v4.1.0 +## explicit; go 1.16 +github.com/containers/podman/v4/cmd/podman/parse +github.com/containers/podman/v4/libpod +github.com/containers/podman/v4/libpod/define +github.com/containers/podman/v4/libpod/driver +github.com/containers/podman/v4/libpod/events +github.com/containers/podman/v4/libpod/layers +github.com/containers/podman/v4/libpod/linkmode +github.com/containers/podman/v4/libpod/lock +github.com/containers/podman/v4/libpod/lock/file +github.com/containers/podman/v4/libpod/lock/shm +github.com/containers/podman/v4/libpod/logs +github.com/containers/podman/v4/libpod/logs/reversereader +github.com/containers/podman/v4/libpod/plugin +github.com/containers/podman/v4/libpod/shutdown +github.com/containers/podman/v4/pkg/annotations +github.com/containers/podman/v4/pkg/checkpoint/crutils +github.com/containers/podman/v4/pkg/copy +github.com/containers/podman/v4/pkg/criu +github.com/containers/podman/v4/pkg/ctime +github.com/containers/podman/v4/pkg/domain/entities +github.com/containers/podman/v4/pkg/domain/entities/reports +github.com/containers/podman/v4/pkg/domain/entities/types +github.com/containers/podman/v4/pkg/env +github.com/containers/podman/v4/pkg/errorhandling +github.com/containers/podman/v4/pkg/hooks +github.com/containers/podman/v4/pkg/hooks/0.1.0 +github.com/containers/podman/v4/pkg/hooks/1.0.0 +github.com/containers/podman/v4/pkg/hooks/exec +github.com/containers/podman/v4/pkg/inspect +github.com/containers/podman/v4/pkg/k8s.io/api/core/v1 +github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource +github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1 +github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types +github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr +github.com/containers/podman/v4/pkg/kubeutils +github.com/containers/podman/v4/pkg/lookup +github.com/containers/podman/v4/pkg/namespaces +github.com/containers/podman/v4/pkg/parallel +github.com/containers/podman/v4/pkg/ps/define +github.com/containers/podman/v4/pkg/resolvconf +github.com/containers/podman/v4/pkg/resolvconf/dns +github.com/containers/podman/v4/pkg/rootless +github.com/containers/podman/v4/pkg/rootlessport +github.com/containers/podman/v4/pkg/seccomp +github.com/containers/podman/v4/pkg/selinux +github.com/containers/podman/v4/pkg/servicereaper +github.com/containers/podman/v4/pkg/signal +github.com/containers/podman/v4/pkg/specgen +github.com/containers/podman/v4/pkg/specgen/generate +github.com/containers/podman/v4/pkg/specgenutil +github.com/containers/podman/v4/pkg/systemd +github.com/containers/podman/v4/pkg/systemd/define +github.com/containers/podman/v4/pkg/timetype +github.com/containers/podman/v4/pkg/trust +github.com/containers/podman/v4/pkg/util +github.com/containers/podman/v4/utils +github.com/containers/podman/v4/version +# github.com/containers/psgo v1.7.2 ## explicit; go 1.14 github.com/containers/psgo github.com/containers/psgo/internal/capabilities @@ -421,8 +449,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.37.0 -## explicit; go 1.14 +# github.com/containers/storage v1.41.0 +## explicit; go 1.16 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -512,7 +540,7 @@ github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory -# github.com/docker/docker v20.10.12+incompatible +# github.com/docker/docker v20.10.14+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -532,6 +560,7 @@ github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client github.com/docker/docker/errdefs +github.com/docker/docker/opts github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/fileutils github.com/docker/docker/pkg/homedir @@ -548,15 +577,18 @@ github.com/docker/docker/pkg/system ## explicit; go 1.13 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-connections v0.4.0 -## explicit +# github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 +## explicit; go 1.13 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig +# github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c +## explicit +github.com/docker/go-events # github.com/docker/go-metrics v0.0.1 ## explicit; go 1.11 github.com/docker/go-metrics -# github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc +# github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 ## explicit github.com/docker/go-plugins-helpers/sdk github.com/docker/go-plugins-helpers/volume @@ -565,6 +597,7 @@ github.com/docker/go-plugins-helpers/volume github.com/docker/go-units # github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 ## explicit +github.com/docker/libnetwork/ipamutils github.com/docker/libnetwork/resolvconf github.com/docker/libnetwork/resolvconf/dns github.com/docker/libnetwork/types @@ -586,8 +619,8 @@ github.com/fatih/color # github.com/fsnotify/fsnotify v1.5.4 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.7.4 -## explicit; go 1.16 +# github.com/fsouza/go-dockerclient v1.7.11 +## explicit; go 1.17 github.com/fsouza/go-dockerclient # github.com/ghodss/yaml v1.0.0 ## explicit @@ -750,6 +783,9 @@ github.com/google/pprof/profile ## explicit; go 1.13 github.com/google/renameio github.com/google/renameio/maybe +# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 +## explicit; go 1.13 +github.com/google/shlex # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid @@ -780,13 +816,6 @@ github.com/hashicorp/go-multierror # github.com/hashicorp/go-retryablehttp v0.7.1 ## explicit; go 1.13 github.com/hashicorp/go-retryablehttp -# github.com/hpcloud/tail v1.0.0 -## explicit -github.com/hpcloud/tail -github.com/hpcloud/tail/ratelimiter -github.com/hpcloud/tail/util -github.com/hpcloud/tail/watch -github.com/hpcloud/tail/winfile # github.com/imdario/mergo v0.3.12 ## explicit; go 1.13 github.com/imdario/mergo @@ -807,8 +836,8 @@ github.com/ishidawataru/sctp # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit github.com/jbenet/go-context/io -# github.com/jinzhu/copier v0.3.2 -## explicit; go 1.15 +# github.com/jinzhu/copier v0.3.5 +## explicit; go 1.13 github.com/jinzhu/copier # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 @@ -822,12 +851,13 @@ github.com/json-iterator/go # github.com/kevinburke/ssh_config v1.1.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.14.2 -## explicit; go 1.15 +# github.com/klauspost/compress v1.15.4 +## explicit; go 1.16 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -886,7 +916,7 @@ github.com/moby/spdystream/spdy # github.com/moby/sys/mount v0.2.0 ## explicit; go 1.14 github.com/moby/sys/mount -# github.com/moby/sys/mountinfo v0.6.0 +# github.com/moby/sys/mountinfo v0.6.1 ## explicit; go 1.16 github.com/moby/sys/mountinfo # github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 @@ -902,20 +932,22 @@ github.com/modern-go/reflect2 # github.com/morikuni/aec v1.0.0 ## explicit github.com/morikuni/aec -# github.com/mtrmac/gpgme v0.1.2 -## explicit; go 1.11 -github.com/mtrmac/gpgme # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg # github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 ## explicit github.com/nozzle/throttler +# github.com/nxadm/tail v1.4.8 +## explicit; go 1.13 +github.com/nxadm/tail +github.com/nxadm/tail/ratelimiter +github.com/nxadm/tail/util +github.com/nxadm/tail/watch +github.com/nxadm/tail/winfile # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/onsi/ginkgo v1.16.5 -## explicit; go 1.16 # github.com/onsi/ginkgo/v2 v2.1.4 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 @@ -957,7 +989,7 @@ github.com/opencontainers/go-digest ## explicit; go 1.11 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.2 +# github.com/opencontainers/runc v1.1.3 ## explicit; go 1.16 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups @@ -973,11 +1005,11 @@ github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.0.3-0.20210709190330-896175883324 +# github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/runtime-tools v0.9.1-0.20210326182921-59cdde06764b -## explicit +# github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f +## explicit; go 1.16 github.com/opencontainers/runtime-tools/error github.com/opencontainers/runtime-tools/filepath github.com/opencontainers/runtime-tools/generate @@ -990,14 +1022,14 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 -## explicit +# github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805 +## explicit; go 1.16 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command github.com/openshift/imagebuilder/dockerfile/parser github.com/openshift/imagebuilder/signal github.com/openshift/imagebuilder/strslice -# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 +# github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f ## explicit github.com/ostreedev/ostree-go/pkg/glibobject github.com/ostreedev/ostree-go/pkg/otbuiltin @@ -1021,6 +1053,9 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat +# github.com/proglottis/gpgme v0.1.1 +## explicit; go 1.11 +github.com/proglottis/gpgme # github.com/prometheus/client_golang v1.12.2 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus @@ -1047,20 +1082,6 @@ github.com/psampaz/go-mod-outdated/internal/runner # github.com/rivo/uniseg v0.2.0 ## explicit; go 1.12 github.com/rivo/uniseg -# github.com/rootless-containers/rootlesskit v0.14.5 -## explicit; go 1.14 -github.com/rootless-containers/rootlesskit/pkg/api -github.com/rootless-containers/rootlesskit/pkg/msgutil -github.com/rootless-containers/rootlesskit/pkg/port -github.com/rootless-containers/rootlesskit/pkg/port/builtin -github.com/rootless-containers/rootlesskit/pkg/port/builtin/child -github.com/rootless-containers/rootlesskit/pkg/port/builtin/msg -github.com/rootless-containers/rootlesskit/pkg/port/builtin/opaque -github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent -github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp -github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp -github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy -github.com/rootless-containers/rootlesskit/pkg/port/portutil # github.com/russross/blackfriday v1.6.0 ## explicit; go 1.13 github.com/russross/blackfriday @@ -1070,7 +1091,7 @@ github.com/russross/blackfriday/v2 # github.com/saschagrunert/go-modiff v1.3.0 ## explicit; go 1.15 github.com/saschagrunert/go-modiff/pkg/modiff -# github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 +# github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang # github.com/sergi/go-diff v1.2.0 @@ -1105,6 +1126,9 @@ github.com/stefanberger/go-pkcs11uri # github.com/stretchr/testify v1.7.2 ## explicit; go 1.13 github.com/stretchr/testify/assert +# github.com/sylabs/sif/v2 v2.7.0 +## explicit; go 1.17 +github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit github.com/syndtr/gocapability/capability @@ -1125,13 +1149,13 @@ github.com/urfave/cli/v2 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v7 v7.1.5 +# github.com/vbauerster/mpb/v7 v7.4.1 ## explicit; go 1.14 github.com/vbauerster/mpb/v7 github.com/vbauerster/mpb/v7/cwriter github.com/vbauerster/mpb/v7/decor github.com/vbauerster/mpb/v7/internal -# github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54 +# github.com/vishvananda/netlink v1.2.1-beta.2 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl @@ -1235,14 +1259,13 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -golang.org/x/crypto/ssh/terminal # golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 ## explicit; go 1.17 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 +# golang.org/x/net v0.0.0-20220607020251-c690dde0001d ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -1262,11 +1285,11 @@ golang.org/x/net/websocket ## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 +# golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a +# golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1423,9 +1446,6 @@ google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb -# gopkg.in/fsnotify.v1 v1.4.7 -## explicit -gopkg.in/fsnotify.v1 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0