diff --git a/go.mod b/go.mod index 7d896ab4e93..483f0de15ab 100644 --- a/go.mod +++ b/go.mod @@ -4,34 +4,34 @@ module github.com/cri-o/cri-o require ( github.com/BurntSushi/toml v1.1.0 - github.com/Microsoft/go-winio v0.5.1 + github.com/Microsoft/go-winio v0.5.2 github.com/blang/semver v3.5.1+incompatible github.com/container-orchestrated-devices/container-device-interface v0.3.2 - github.com/containerd/cgroups v1.0.2 - github.com/containerd/containerd v1.5.8 + github.com/containerd/cgroups v1.0.3 + github.com/containerd/containerd v1.6.2 github.com/containerd/cri-containerd v1.19.0 github.com/containerd/fifo v1.0.0 github.com/containerd/ttrpc v1.1.0 github.com/containerd/typeurl v1.0.2 github.com/containernetworking/cni v1.1.0 github.com/containernetworking/plugins v1.1.1 - github.com/containers/buildah v1.23.1 - github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f + github.com/containers/buildah v1.25.1 + github.com/containers/common v0.47.5 github.com/containers/conmon v2.0.20+incompatible - github.com/containers/image/v5 v5.17.0 - github.com/containers/ocicrypt v1.1.2 + github.com/containers/image/v5 v5.20.0 + github.com/containers/ocicrypt v1.1.3 github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d - github.com/containers/storage v1.37.0 + github.com/containers/storage v1.39.0 github.com/coreos/go-systemd/v22 v22.3.2 github.com/cpuguy83/go-md2man v1.0.10 github.com/creack/pty v1.1.17 github.com/cri-o/ocicni v0.3.1 github.com/cyphar/filepath-securejoin v0.2.3 - github.com/docker/distribution v2.7.1+incompatible + github.com/docker/distribution v2.8.1+incompatible github.com/docker/go-units v0.4.0 github.com/emicklei/go-restful v2.15.0+incompatible github.com/fsnotify/fsnotify v1.5.4 - github.com/go-logr/logr v1.2.1 + github.com/go-logr/logr v1.2.2 github.com/go-zoo/bone v1.3.0 github.com/godbus/dbus/v5 v5.0.6 github.com/gogo/protobuf v1.3.2 @@ -44,32 +44,32 @@ require ( github.com/onsi/ginkgo/v2 v2.1.4 github.com/onsi/gomega v1.19.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 - github.com/opencontainers/runc v1.0.3 + github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 + github.com/opencontainers/runc v1.1.0 github.com/opencontainers/runtime-spec v1.0.3-0.20210709190330-896175883324 github.com/opencontainers/runtime-tools v0.9.1-0.20210326182921-59cdde06764b - github.com/opencontainers/selinux v1.9.1 + github.com/opencontainers/selinux v1.10.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.11.1 github.com/psampaz/go-mod-outdated v0.8.0 github.com/sirupsen/logrus v1.8.1 github.com/soheilhy/cmux v0.1.5 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.7.1 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 github.com/urfave/cli/v2 v2.3.0 github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 go.opentelemetry.io/otel v1.3.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0 - go.opentelemetry.io/otel/sdk v1.2.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 + go.opentelemetry.io/otel/sdk v1.3.0 golang.org/x/net v0.0.0-20220225172249-27dd8689420f golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220412211240-33da011f77ad google.golang.org/grpc v1.43.0 - k8s.io/api v0.22.2 - k8s.io/apimachinery v0.22.2 - k8s.io/client-go v0.22.2 - k8s.io/cri-api v0.23.0-alpha.3 + k8s.io/api v0.22.5 + k8s.io/apimachinery v0.22.5 + k8s.io/client-go v0.22.5 + k8s.io/cri-api v0.23.1 k8s.io/klog/v2 v2.30.0 k8s.io/kubernetes v1.23.0 k8s.io/release v0.8.0 @@ -82,59 +82,61 @@ require ( require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Microsoft/hcsshim v0.8.23 // indirect + github.com/Microsoft/hcsshim v0.9.2 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f // indirect github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/acomagu/bufpipe v1.0.3 // indirect github.com/aws/aws-sdk-go v1.38.49 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.2.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/checkpoint-restore/checkpointctl v0.0.0-20210922093614-c31748bec9f2 // indirect - github.com/checkpoint-restore/go-criu/v5 v5.1.0 // indirect + github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect github.com/cheggaaa/pb/v3 v3.0.5 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect - github.com/cilium/ebpf v0.6.2 // indirect - github.com/containerd/console v1.0.2 // indirect + github.com/cilium/ebpf v0.7.0 // indirect + github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-runc v1.0.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.9.0 // indirect - github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b // indirect + github.com/containerd/stargz-snapshotter/estargz v0.11.3 // indirect + github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect github.com/containers/psgo v1.7.1 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect - github.com/docker/docker v20.10.11+incompatible // indirect + github.com/docker/docker v20.10.14+incompatible // indirect github.com/docker/docker-credential-helpers v0.6.4 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-plugins-helpers v0.0.0-20200102110956-c9a8a2d92ccc // indirect github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 // indirect github.com/emirpasic/gods v1.12.0 // indirect - github.com/fatih/color v1.9.0 // indirect - github.com/fsouza/go-dockerclient v1.7.4 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fsouza/go-dockerclient v1.7.10 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.0.0 // indirect - github.com/go-git/go-git/v5 v5.2.0 // indirect - github.com/go-logr/stdr v1.2.0 // indirect + github.com/go-git/go-billy/v5 v5.3.1 // indirect + github.com/go-git/go-git/v5 v5.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect - github.com/google/go-cmp v0.5.6 // indirect + github.com/google/go-cmp v0.5.7 // indirect github.com/google/go-github/v33 v33.0.0 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.6.4 // indirect github.com/hpcloud/tail v1.0.0 // indirect @@ -142,80 +144,81 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jinzhu/copier v0.3.2 // indirect + github.com/jinzhu/copier v0.3.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect - github.com/klauspost/compress v1.13.6 // indirect + github.com/kevinburke/ssh_config v1.1.0 // indirect + github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/lithammer/dedent v1.1.0 // indirect github.com/manifoldco/promptui v0.9.0 // indirect - github.com/mattn/go-colorable v0.1.8 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/miekg/pkcs11 v1.0.3 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.4.2 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.4.1 // indirect + github.com/moby/sys/mountinfo v0.6.0 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/mtrmac/gpgme v0.1.2 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 // indirect - github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 // indirect + github.com/openshift/imagebuilder v1.2.3 // indirect + github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/proglottis/gpgme v0.1.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.30.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rootless-containers/rootlesskit v0.14.5 // indirect - github.com/russross/blackfriday v1.5.2 // indirect + github.com/russross/blackfriday v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/saschagrunert/go-modiff v1.3.0 // indirect - github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf // indirect - github.com/sergi/go-diff v1.1.0 // indirect + github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 // indirect + github.com/sergi/go-diff v1.2.0 // indirect github.com/shirou/gopsutil/v3 v3.20.12 // indirect - github.com/spf13/cobra v1.2.1 // indirect + github.com/spf13/cobra v1.4.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spiegel-im-spiegel/errs v1.0.2 // indirect github.com/spiegel-im-spiegel/go-cvss v0.4.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect + github.com/sylabs/sif/v2 v2.3.2 // indirect github.com/tchap/go-patricia v2.3.0+incompatible // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - github.com/vbauerster/mpb/v7 v7.1.5 // indirect + github.com/vbauerster/mpb/v7 v7.3.2 // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect github.com/xanzy/go-gitlab v0.43.0 // indirect - github.com/xanzy/ssh-agent v0.2.1 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect + github.com/xanzy/ssh-agent v0.3.1 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 // indirect go.opentelemetry.io/otel/trace v1.3.0 // indirect - go.opentelemetry.io/proto/otlp v0.10.0 // indirect - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect + go.opentelemetry.io/proto/otlp v0.11.0 // indirect + golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/tools v0.1.10 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20211005153810-c76a74d43a8e // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -224,9 +227,9 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/apiserver v0.20.6 // indirect + k8s.io/apiserver v0.22.5 // indirect k8s.io/cloud-provider v0.0.0 // indirect - k8s.io/component-base v0.20.6 // indirect + k8s.io/component-base v0.22.5 // indirect k8s.io/component-helpers v0.0.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/mount-utils v0.0.0 // indirect diff --git a/go.sum b/go.sum index cdd67215bea..3a7be3af0e3 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -22,8 +23,17 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -33,6 +43,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/logging v1.1.2/go.mod h1:KrljuAHIw631j9+QXsnq9vDwsrwmdxfGpivMR68M7DY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= @@ -49,6 +60,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -85,9 +97,11 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= github.com/GoogleCloudPlatform/testgrid v0.0.38/go.mod h1:KGQ3wqPuX1xCqeSHvcGmAafuMtV7cRNST9hZS7VCahA= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= @@ -102,8 +116,9 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -115,14 +130,20 @@ github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= -github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f h1:J2FzIrXN82q5uyUraeJpLIm7U6PffRwje2ORho5yIik= +github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= @@ -134,8 +155,9 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -153,7 +175,9 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -175,7 +199,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= @@ -183,6 +206,7 @@ github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= @@ -198,9 +222,11 @@ github.com/carolynvs/magex v0.5.0/go.mod h1:i4bMWEmwGw2+W/u/0cEs5FTEyLtaN5DEKvVL github.com/carolynvs/magex v0.6.0/go.mod h1:hqaEkr9TAv+kFb/5wgDiTdszF13rpe0Q+bWHmTe6N74= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -214,8 +240,9 @@ github.com/checkpoint-restore/checkpointctl v0.0.0-20210922093614-c31748bec9f2/g github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.1.0 h1:BkVH17kcwgmKMnFArsvLrxuBbMwfvPNYRB7mfJ0lzyI= github.com/checkpoint-restore/go-criu/v5 v5.1.0/go.mod h1:iaS8bb7p6zKJanp1Qe8mpl7+bnkYBR500psJR6mwma0= +github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE= github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= @@ -229,8 +256,11 @@ github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvso github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -240,7 +270,9 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= @@ -262,15 +294,16 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.2 h1:mZBclaSgNDfPWtfhj2xJY28LZ9nYIgzB0pwSURPl6JM= -github.com/containerd/cgroups v1.0.2/go.mod h1:qpbpJ1jmlqsR9f2IyaLPsdkCdnt0rbDVqIDlhuu5tRY= +github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -288,16 +321,20 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU= +github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/cri-containerd v1.19.0 h1:PcTvvl+SHaekCMQZFQkYjn1RKlYrK6khYbuhOeF68k0= github.com/containerd/cri-containerd v1.19.0/go.mod h1:wxbGdReWGCalzGOEpifoHeYCK4xAgnj4o/4bVB+9voU= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -309,6 +346,8 @@ github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -319,13 +358,17 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b583765b/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU= -github.com/containerd/stargz-snapshotter/estargz v0.9.0 h1:PkB6BSTfOKX23erT2GkoUKkJEcXfNcyKskIViK770v8= github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= +github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac= +github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M= +github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -354,25 +397,30 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE= github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= -github.com/containers/buildah v1.23.1 h1:Tpc9DsRuU+0Oofewpxb6OJVNQjCu7yloN/obUqzfDTY= github.com/containers/buildah v1.23.1/go.mod h1:4WnrN0yrA7ab0ppgunixu2WM1rlD2rG8QLJAKbEkZlQ= +github.com/containers/buildah v1.25.1 h1:X3Mong70pw2ac8j9SwTnO03CRmaq0Zy7LXegaYWG4Yw= +github.com/containers/buildah v1.25.1/go.mod h1:Q8mkvdtBL6c12UzH4dMbCQ1iEPkPdKJokPe8zlbZvLQ= github.com/containers/common v0.44.2/go.mod h1:7sdP4vmI5Bm6FPFxb3lvAh1Iktb6tiO1MzjUzhxdoGo= -github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f h1:vVmx51AzWvB4/ao2zyR6s053a1leLTOh+zsOPVWQRgA= github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f/go.mod h1:aml/OO4FmYfPbfT87rvWiCgkLzTdqO6PuZ/xXq6bPbk= +github.com/containers/common v0.47.5 h1:Qm9o+wVPO9sbggTKubN3xYMtPRaPv7dmcrJQgongHHw= +github.com/containers/common v0.47.5/go.mod h1:HgX0mFXyB0Tbe2REEIp9x9CxET6iSzmHfwR6S/t2LZc= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image/v5 v5.10.4/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII= github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4= -github.com/containers/image/v5 v5.17.0 h1:KS5pro80CCsSp5qDBTMmSAWQo+xcBX19zUPExmYX2OQ= -github.com/containers/image/v5 v5.17.0/go.mod h1:GnYVusVRFPMMTAAUkrcS8NNSpBp8oyrjOUe04AAmRr4= -github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= +github.com/containers/image/v5 v5.19.1/go.mod h1:ewoo3u+TpJvGmsz64XgzbyTHwHtM94q7mgK/pX+v2SE= +github.com/containers/image/v5 v5.20.0 h1:BYFMRvYqmEHnHo0sjTbnLbj0fzkGLDx6P57lszm30B4= +github.com/containers/image/v5 v5.20.0/go.mod h1:5UL1ooih6+USVYXk19r8ScQNsbTprhlJxrHezAu4OVE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0= github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3 h1:uMxn2wTb4nDR7GqG3rnZSfpJXqWURfzZ7nKydzIeKpA= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d h1:cztBXICnZeM0Cz/7IzbLbMs8jYCnJmil94NJszFYNXA= github.com/containers/podman/v3 v3.2.0-rc1.0.20211005134800-8bcc086b1b9d/go.mod h1:RvfQgiFLco33brSGNga2tLZifqKiUu8n6OxnjDTPUb4= github.com/containers/psgo v1.7.1 h1:2N6KADeFvBm1aI2iXxu6+/Xh7CCkdh8p8F3F/cpIU5I= @@ -381,8 +429,10 @@ github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3E github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20= github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8= -github.com/containers/storage v1.37.0 h1:HVhDsur6sx889ZIZ1d1kEiOzv3gsr5q0diX2VZmOdSg= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= +github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ= +github.com/containers/storage v1.39.0 h1:NV93CVx6KAQ04cldeJyqa7uDZivhmO3rXla1cyn75dk= +github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -408,8 +458,9 @@ github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3/go.mod h github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -442,21 +493,27 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.3-0.20220208084023-a5c757555091+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo= -github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.14+incompatible h1:+T9/PRYWNDo5SZl5qS1r9Mo/0Q8AwxKKPtu9S1yxM0w= +github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= +github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -492,7 +549,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -501,8 +560,9 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -516,8 +576,10 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsouza/go-dockerclient v1.7.4 h1:daYb0km2a91aNt2KTc4AEcTwgExYtQXHhkt5mjdRD1o= github.com/fsouza/go-dockerclient v1.7.4/go.mod h1:het+LPt7NaTEVGgwXJAKxPn77RZrQKb2EXJb4e+BHv0= +github.com/fsouza/go-dockerclient v1.7.7/go.mod h1:njNCXvoZj3sLPjf3yO0DPHf1mdLdCPDYPc14GskKA4Y= +github.com/fsouza/go-dockerclient v1.7.10 h1:KIda66AP88BWQpyg+8ve9LQmn1ZZ/usCbmxeBoMth3U= +github.com/fsouza/go-dockerclient v1.7.10/go.mod h1:rdD3Eq3rHwMA8p/xrn+gLb+3ov7uRJGVkV1HsUFY39A= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -532,12 +594,16 @@ github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= +github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= +github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= +github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -552,10 +618,12 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.1 h1:DX7uPQ4WgAWfoh+NGGlbJQswnYIVvz0SRlLS3rPZQDA= github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.0 h1:j4LrlVXgrbIWO83mmQUnK0Hi+YnbD+vzrE1z/EphbFE= +github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= @@ -685,9 +753,11 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-containerregistry v0.3.0/go.mod h1:BJ7VxR1hAhdiZBGGnvGETHEmFs1hzXc4VM1xjOPO9wA= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github/v33 v33.0.0 h1:qAf9yP0qc54ufQxzwv+u9H0tiVOnPJxo0lI/JXqw3ZM= github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= @@ -707,6 +777,7 @@ github.com/google/licenseclassifier/v2 v2.0.0-alpha.1/go.mod h1:YAgBGGTeNDMU+WfI github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -720,8 +791,11 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU= github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= @@ -736,6 +810,8 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= @@ -762,25 +838,34 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.4 h1:BbgctKO892xEyOXnGiaAwIoSq1QZ/SS4AhjoAh9DnfY= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -789,11 +874,18 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs= @@ -801,6 +893,7 @@ github.com/hexdigest/gowrap v1.1.8/go.mod h1:H/JiFmQMp//tedlV8qt2xBdGzmne6bpbaSu github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -825,9 +918,11 @@ github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbB github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= -github.com/jinzhu/copier v0.3.2 h1:QdBOCbaouLDYaIPFfi1bKv5F5tPpeTwXe4sD0jqtz5w= github.com/jinzhu/copier v0.3.2/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -849,6 +944,7 @@ github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqo github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -862,8 +958,10 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/kaey/framebuffer v0.0.0-20140402104929-7b385489a1ff/go.mod h1:tS4qtlcKqtt3tCIHUflVSqeP3CLH5Qtv2szX9X2SyhU= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.1.0 h1:pH/t1WS9NzT8go394IqZeJTMHVm6Cr6ZJ6AQ+mdNo/o= +github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -879,8 +977,11 @@ github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -905,13 +1006,16 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.12.1/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -925,12 +1029,17 @@ github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GW github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -947,6 +1056,7 @@ github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRC github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= @@ -968,13 +1078,17 @@ github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZ github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -986,8 +1100,9 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= @@ -999,9 +1114,13 @@ github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.0 h1:gUDhXQx58YNrpHlK4nSL+7y2pxFZkUcXqzFDKWdC0Oo= +github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -1021,7 +1140,6 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s= github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1059,6 +1177,7 @@ github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISq github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= @@ -1076,6 +1195,7 @@ github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQ github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1088,8 +1208,11 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20200206005212-79b036d80240/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 h1:TVzvdjOalkJBNkbpPVMAr4KV9QRf2IjfxdyxwAK78Gs= github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 h1:g47eG1u/gw0JB7mZ88TcHKCmsy7sWUNZD8ZS9Jhi0O8= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1098,8 +1221,9 @@ github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT5 github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1120,22 +1244,27 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3 github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= -github.com/opencontainers/selinux v1.9.1 h1:b4VPEF3O5JLZgdTDBmGepaaIbAo0GqoF6EBRq5f/g3Y= github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4= +github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656/go.mod h1:9aJRczxCH0mvT6XQ+5STAQaPWz7OsWcU5/mRkt8IWeo= +github.com/openshift/imagebuilder v1.2.3 h1:jvA7mESJdclRKkTe3Yl6UWlliFNVW6mLY8RI+Rrfhfo= +github.com/openshift/imagebuilder v1.2.3/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orangecms/go-framebuffer v0.0.0-20200613202404-a0700d90c330/go.mod h1:3Myb/UszJY32F2G7yGkUtcW/ejHpjlGfYLim7cv2uKA= -github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/getopt/v2 v2.1.0/go.mod h1:4NtW75ny4eBw9fO1bhtNdYTlZKYX5/tBLtsOpwKIKd0= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -1152,17 +1281,22 @@ github.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiK github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ= +github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1174,6 +1308,7 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -1213,8 +1348,9 @@ github.com/rogpeppe/go-internal v1.8.1-0.20210923151022-86f73c517451/go.mod h1:J github.com/rootless-containers/rootlesskit v0.14.5 h1:X4eNt2e1h/uSjlssKqpeTY5fatrjDz9F9FX05RJB7Tw= github.com/rootless-containers/rootlesskit v0.14.5/go.mod h1:Ai3detLzryb/4EkzXmNfh8aByUcBXp/qqkQusJs1SO8= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1223,6 +1359,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/saschagrunert/ccli v1.0.2-0.20200423111659-b68f755cc0f5/go.mod h1:nF6F8YjOZIYqRQ2GVOi43O13vaN2W1OQq1LcxUGdAfw= github.com/saschagrunert/go-modiff v1.3.0 h1:K/CJGaWH8poLrg/NpNQzHU60T/FiFtgZl2w8JhZequA= github.com/saschagrunert/go-modiff v1.3.0/go.mod h1:SSVmjiafeH2c1ITdPSmVDJ4/plhYJteb2VytMaD5/ZE= @@ -1231,15 +1368,19 @@ github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0= github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= github.com/sendgrid/rest v2.6.2+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= github.com/sendgrid/sendgrid-go v3.7.2+incompatible/go.mod h1:QRQt+LX/NmgVEvmdRw0VT/QgUn499+iza2FnDca9fg8= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada h1:WokF3GuxBeL+n4Lk4Fa8v9mbdjlrl7bHuneF4N1bk2I= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/gopsutil/v3 v3.20.12 h1:abpcjSQRHdb3thCge/UyJty9CnvvmUHljTSrjtFU+Og= @@ -1270,18 +1411,22 @@ github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34c github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1294,6 +1439,7 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spiegel-im-spiegel/errs v1.0.2 h1:v4amEwRDqRWjKHOILQnJSovYhZ4ZttEnBBXNXEzS6Sc= github.com/spiegel-im-spiegel/errs v1.0.2/go.mod h1:UoasJYYujMcdkbT9USv8dfZWoMyaY3btqQxoLJImw0A= github.com/spiegel-im-spiegel/go-cvss v0.4.0 h1:A+S9I01wkiEo6e66xFbxtz9LfXdsGUr5FppYozUAmmI= @@ -1313,9 +1459,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo= +github.com/sylabs/sif/v2 v2.3.1/go.mod h1:NnvveH62GiibimL00MrI6YYcZfb7DnZMcRo/40giY+0= +github.com/sylabs/sif/v2 v2.3.2 h1:Kj60dUcE3TSM8Px4TaIbX7PUafB1QGhUi70Fz5Gf7iU= +github.com/sylabs/sif/v2 v2.3.2/go.mod h1:IrLX2pzmQ2O4qgv5iy3HdKJcBNYds9DTMd9Je8A9tX4= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -1328,6 +1479,8 @@ github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiff github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/twitchtv/twirp v5.8.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= github.com/u-root/gobusybox/src v0.0.0-20220120221406-4e2fbb8f41bf/go.mod h1:n6S68wL67GT3KLZWmLzrJCr1rMLQ6xzA9rv2k46kF2Q= github.com/u-root/iscsinl v0.1.1-0.20210528121423-84c32645822a/go.mod h1:RWIgJWqm9/0gjBZ0Hl8iR6MVGzZ+yAda2uqqLmetE2I= @@ -1366,8 +1519,8 @@ github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8J github.com/vbauerster/mpb/v6 v6.0.4/go.mod h1:a/+JT57gqh6Du0Ay5jSR+uBMfXGdlR7VQlGP52fJxLM= github.com/vbauerster/mpb/v7 v7.1.3/go.mod h1:X5GlohZw2fIpypMXWaKart+HGSAjpz49skxkDk+ZL7c= github.com/vbauerster/mpb/v7 v7.1.4/go.mod h1:4zulrZfvshMOnd2APiHgWS9Yrw08AzZVRr9G11tkpcQ= -github.com/vbauerster/mpb/v7 v7.1.5 h1:vtUEUfQHmNeJETyF4AcRCOV6RC4wqFwNORy52UMXPbQ= -github.com/vbauerster/mpb/v7 v7.1.5/go.mod h1:4M8+qAoQqV60WDNktBM5k05i1iTrXE7rjKOHEVkVlec= +github.com/vbauerster/mpb/v7 v7.3.2 h1:tCuxMy8G9cLdjb61b6wO7I1vRT/LyMEzRbr3xCC0JPU= +github.com/vbauerster/mpb/v7 v7.3.2/go.mod h1:wfxIZcOJq/bG1/lAtfzMXcOiSvbqVi/5GX5WCSi+IsA= github.com/vdemeester/k8s-pkg-credentialprovider v1.18.1-0.20201019120933-f1d16962a4db/go.mod h1:grWy0bkr1XO6hqbaaCKaPXqkBVlMGHYG6PGykktwbJc= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= @@ -1387,11 +1540,14 @@ github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/go-gitlab v0.43.0 h1:rpOZQjxVJGW/ch+Jy4j7W4o7BB1mxkXJNVGuplZ7PUs= github.com/xanzy/go-gitlab v0.43.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/xanzy/ssh-agent v0.3.1 h1:AmzO1SSWxw73zxFZPRwaMN1MohDw8UyHnmuxyceTEGo= +github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= @@ -1418,8 +1574,11 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= @@ -1441,33 +1600,36 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.2 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 h1:xzbcGykysUh776gzD1LUPsNNHKWN0kQWDnJhn1ddUuk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0/go.mod h1:14T5gr+Y6s2AgHPqBMgnGwp04csUjQmYXFWPeiBoq5s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0 h1:VsgsSCDwOSuO8eMVh63Cd4nACMqgjpmAeJSIvVNneD0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0/go.mod h1:9mLBBnPRf3sf+ASVH2p9xREXVBvwib02FxcKnavtExg= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= -go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.10.0 h1:n7brgtEbDvXEgGyKKo8SobKT1e9FewlDtXzkVP5djoE= -go.opentelemetry.io/proto/otlp v0.10.0/go.mod h1:zG20xCK0szZ1xdokeSOwEcmlXu+x9kkdRe6N1DhKcfU= +go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1486,6 +1648,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1494,10 +1657,12 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1528,6 +1693,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= @@ -1542,6 +1708,7 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= @@ -1601,16 +1768,21 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1629,8 +1801,12 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1679,6 +1855,7 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1739,6 +1916,7 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1748,27 +1926,38 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210925032602-92d5a993a665/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1823,6 +2012,7 @@ golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDq golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1876,6 +2066,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= @@ -1919,6 +2110,17 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1956,12 +2158,16 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1994,6 +2200,7 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -2036,6 +2243,7 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go index 689e4da6bda..2342a7fcd6f 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -113,6 +113,69 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta return hdr } +// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file +// from the tar header and returns the security descriptor into a byte slice. +func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { + // Maintaining old SDDL-based behavior for backward + // compatibility. All new tar headers written by this library + // will have raw binary for the security descriptor. + var sd []byte + var err error + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + sd, err = winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return nil, err + } + } + if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { + sd, err = base64.StdEncoding.DecodeString(sdraw) + if err != nil { + return nil, err + } + } + return sd, nil +} + +// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the +// current file from the tar header and returns it as a byte slice. +func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { + var eas []winio.ExtendedAttribute + var eadata []byte + var err error + for k, v := range hdr.PAXRecords { + if !strings.HasPrefix(k, hdrEaPrefix) { + continue + } + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return nil, err + } + eas = append(eas, winio.ExtendedAttribute{ + Name: k[len(hdrEaPrefix):], + Value: data, + }) + } + if len(eas) != 0 { + eadata, err = winio.EncodeExtendedAttributes(eas) + if err != nil { + return nil, err + } + } + return eadata, nil +} + +// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header +// and encodes it into a byte slice. The file for which this function is called must be a +// symlink. +func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { + _, isMountPoint := hdr.PAXRecords[hdrMountPoint] + rp := winio.ReparsePoint{ + Target: filepath.FromSlash(hdr.Linkname), + IsMountPoint: isMountPoint, + } + return winio.EncodeReparsePoint(&rp) +} + // WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. // // This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. @@ -358,21 +421,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win // tar file that was not processed, or io.EOF is there are no more. func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { bw := winio.NewBackupStreamWriter(w) - var sd []byte - var err error - // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written - // by this library will have raw binary for the security descriptor. - if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } - if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) - if err != nil { - return nil, err - } + + sd, err := SecurityDescriptorFromTarHeader(hdr) + if err != nil { + return nil, err } if len(sd) != 0 { bhdr := winio.BackupHeader{ @@ -388,25 +440,12 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( return nil, err } } - var eas []winio.ExtendedAttribute - for k, v := range hdr.PAXRecords { - if !strings.HasPrefix(k, hdrEaPrefix) { - continue - } - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return nil, err - } - eas = append(eas, winio.ExtendedAttribute{ - Name: k[len(hdrEaPrefix):], - Value: data, - }) + + eadata, err := ExtendedAttributesFromTarHeader(hdr) + if err != nil { + return nil, err } - if len(eas) != 0 { - eadata, err := winio.EncodeExtendedAttributes(eas) - if err != nil { - return nil, err - } + if len(eadata) != 0 { bhdr := winio.BackupHeader{ Id: winio.BackupEaData, Size: int64(len(eadata)), @@ -420,13 +459,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( return nil, err } } + if hdr.Typeflag == tar.TypeSymlink { - _, isMountPoint := hdr.PAXRecords[hdrMountPoint] - rp := winio.ReparsePoint{ - Target: filepath.FromSlash(hdr.Linkname), - IsMountPoint: isMountPoint, - } - reparse := winio.EncodeReparsePoint(&rp) + reparse := EncodeReparsePointFromTarHeader(hdr) bhdr := winio.BackupHeader{ Id: winio.BackupReparseData, Size: int64(len(reparse)), @@ -439,7 +474,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( if err != nil { return nil, err } + } + if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { bhdr := winio.BackupHeader{ Id: winio.BackupData, diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 0385e410812..293ab54c80c 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -143,6 +144,11 @@ func (f *win32File) Close() error { return nil } +// IsClosed checks if the file has been closed +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + // prepareIo prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIo() (*ioOperation, error) { diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index b632f8f8bb9..b2b644d002a 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error { return conn.sock.Close() } +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + func (conn *HvsockConn) shutdown(how int) error { - err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if conn.IsClosed() { + return ErrFileClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) if err != nil { return os.NewSyscallError("shutdown", err) } return nil } -// CloseRead shuts down the read end of the socket. +// CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { @@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error { return nil } -// CloseWrite shuts down the write end of the socket, notifying the other endpoint that -// no more data will be written. +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index f497c0e3917..2d9161e2dee 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -14,8 +14,6 @@ import ( "encoding/binary" "fmt" "strconv" - - "golang.org/x/sys/windows" ) // Variant specifies which GUID variant (or "type") of the GUID. It determines @@ -41,13 +39,6 @@ type Version uint8 var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID - // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. func NewV4() (GUID, error) { var b [16]byte diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 00000000000..f64d828c0ba --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,15 @@ +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 00000000000..83617f4eee9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,10 @@ +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go index fca241590cc..602920786c9 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go @@ -3,11 +3,10 @@ package security import ( + "fmt" "os" "syscall" "unsafe" - - "github.com/pkg/errors" ) type ( @@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error { // Stat (to determine if `name` is a directory). s, err := os.Stat(name) if err != nil { - return errors.Wrapf(err, "%s os.Stat %s", gvmga, name) + return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err) } // Get a handle to the file/directory. Must defer Close on success. @@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error { sd := uintptr(0) origDACL := uintptr(0) if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { - return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) } defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) @@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error { // And finally use SetSecurityInfo to apply the updated DACL. if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { - return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name) + return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err) } return nil @@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) { } fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name) + return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err) } return fd, nil } @@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp // Generate pointers to the SIDs based on the string SIDs sid, err := syscall.StringToSid(sidVmGroup) if err != nil { - return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup) + return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err) } inheritance := inheritModeNoInheritance @@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp modifiedDACL := uintptr(0) if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil { - return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name) + return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err) } return modifiedDACL, nil diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go index a33a36c0ffb..f7f78fc2304 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package vhd @@ -7,14 +8,13 @@ import ( "syscall" "github.com/Microsoft/go-winio/pkg/guid" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) //go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go //sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk -//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk +//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk //sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk //sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk //sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath @@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct { Version2 OpenVersion2 } +// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating +// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. +type openVersion2 struct { + getInfoOnly int32 + readOnly int32 + resiliencyGUID guid.GUID +} + +type openVirtualDiskParameters struct { + version uint32 + version2 openVersion2 +} + type AttachVersion2 struct { RestrictedOffset uint64 RestrictedLength uint64 } type AttachVirtualDiskParameters struct { - Version uint32 // Must always be set to 2 + Version uint32 Version2 AttachVersion2 } @@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { return err } - if err := syscall.CloseHandle(handle); err != nil { - return err - } - return nil + return syscall.CloseHandle(handle) } // DetachVirtualDisk detaches a virtual hard disk by handle. func DetachVirtualDisk(handle syscall.Handle) (err error) { if err := detachVirtualDisk(handle, 0, 0); err != nil { - return errors.Wrap(err, "failed to detach virtual disk") + return fmt.Errorf("failed to detach virtual disk: %w", err) } return nil } @@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua parameters, nil, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } @@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) { AttachVirtualDiskFlagNone, ¶ms, ); err != nil { - return errors.Wrap(err, "failed to attach virtual disk") + return fmt.Errorf("failed to attach virtual disk: %w", err) } return nil } @@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual var ( handle syscall.Handle defaultType VirtualStorageType + getInfoOnly int32 + readOnly int32 ) if parameters.Version != 2 { return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version) } + if parameters.Version2.GetInfoOnly { + getInfoOnly = 1 + } + if parameters.Version2.ReadOnly { + readOnly = 1 + } + params := &openVirtualDiskParameters{ + version: parameters.Version, + version2: openVersion2{ + getInfoOnly, + readOnly, + parameters.Version2.ResiliencyGUID, + }, + } if err := openVirtualDisk( &defaultType, vhdPath, uint32(virtualDiskAccessMask), uint32(openVirtualDiskFlags), - parameters, + params, &handle, ); err != nil { - return 0, errors.Wrap(err, "failed to open virtual disk") + return 0, fmt.Errorf("failed to open virtual disk: %w", err) } return handle, nil } @@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, nil, &handle, ); err != nil { - return handle, errors.Wrap(err, "failed to create virtual disk") + return handle, fmt.Errorf("failed to create virtual disk: %w", err) } return handle, nil } @@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { &diskPathSizeInBytes, &diskPhysicalPathBuf[0], ); err != nil { - return "", errors.Wrap(err, "failed to get disk physical path") + return "", fmt.Errorf("failed to get disk physical path: %w", err) } return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil } @@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error createParams, ) if err != nil { - return fmt.Errorf("failed to create differencing vhd: %s", err) + return fmt.Errorf("failed to create differencing vhd: %w", err) } if err := syscall.CloseHandle(vhdHandle); err != nil { - return fmt.Errorf("failed to close differencing vhd handle: %s", err) + return fmt.Errorf("failed to close differencing vhd handle: %w", err) } return nil } diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go index 7fb5f3651b9..1d7498db3be 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -88,7 +88,7 @@ func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint return } -func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) { +func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { var _p0 *uint16 _p0, win32err = syscall.UTF16PtrFromString(path) if win32err != nil { @@ -97,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle) } -func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) { +func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) if r0 != 0 { win32err = syscall.Errno(r0) diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore index aec9bd4bb05..54ed6f06c9d 100644 --- a/vendor/github.com/Microsoft/hcsshim/.gitignore +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -1,3 +1,38 @@ +# Binaries for programs and plugins *.exe -.idea -.vscode +*.dll +*.so +*.dylib + +# Ignore vscode setting files +.vscode/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Ignore gcs bin directory +service/bin/ +service/pkg/ + +*.img +*.vhd +*.tar.gz + +# Make stuff +.rootfs-done +bin/* +rootfs/* +*.o +/build/ + +deps/* +out/* + +.idea/ +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml new file mode 100644 index 00000000000..2400e7f1e02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -0,0 +1,99 @@ +run: + timeout: 8m + +linters: + enable: + - stylecheck + +linters-settings: + stylecheck: + # https://staticcheck.io/docs/checks + checks: ["all"] + + +issues: + # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like + # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go + # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't + # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. + exclude-rules: + - path: layer.go + linters: + - stylecheck + Text: "ST1003:" + + - path: hcsshim.go + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\wclayer\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: hcn\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcs\\schema1\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hns\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\compactext4\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: ext4\\internal\\format\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guestrequest\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\guest\\prot\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\windevice\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\winapi\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\vmcompute\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\regstate\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: internal\\hcserror\\ + linters: + - stylecheck + Text: "ST1003:" \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile new file mode 100644 index 00000000000..a8f5516cd0b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -0,0 +1,87 @@ +BASE:=base.tar.gz + +GO:=go +GO_FLAGS:=-ldflags "-s -w" # strip Go binaries +CGO_ENABLED:=0 +GOMODVENDOR:= + +CFLAGS:=-O2 -Wall +LDFLAGS:=-static -s # strip C binaries + +GO_FLAGS_EXTRA:= +ifeq "$(GOMODVENDOR)" "1" +GO_FLAGS_EXTRA += -mod=vendor +endif +GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +# The link aliases for gcstools +GCS_TOOLS=\ + generichook + +.PHONY: all always rootfs test + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin deps rootfs out + +test: + cd $(SRCROOT) && go test -v ./internal/guest/... + +out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile + @mkdir -p out + rm -rf rootfs + mkdir -p rootfs/bin/ + cp bin/init rootfs/ + cp bin/vsockexec rootfs/bin/ + cp bin/cmd/gcs rootfs/bin/ + cp bin/cmd/gcstools rootfs/bin/ + for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done + git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \ + git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch + tar -zcf $@ -C rootfs . + rm -rf rootfs + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed + +-include deps/cmd/gcs.gomake +-include deps/cmd/gcstools.gomake + +# Implicit rule for includes that define Go targets. +%.gomake: $(SRCROOT)/Makefile + @mkdir -p $(dir $@) + @/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new + @/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new + @/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new + @/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new + @/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new + @/bin/echo -e '\tmv $$@.new $$@' >> $@.new + @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new + mv $@.new $@ + +VPATH=$(SRCROOT) + +bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +bin/init: init/init.o vsockexec/vsock.o + @mkdir -p bin + $(CC) $(LDFLAGS) -o $@ $^ + +%.o: %.c + @mkdir -p $(dir $@) + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 95c30036562..b8ca926a9da 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -2,13 +2,67 @@ [![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) -This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). +This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers. -It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well. +It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well. + +## Building + +While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +### Linux Hyper-V Container Guest Agent + +To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. +```powershell +C:\> $env:GOOS="linux" +C:\> go build .\cmd\gcs\ +``` + +or on a Linux machine +```sh +> go build ./cmd/gcs +``` + +If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container. + +```sh +docker pull busybox +docker run --name base_image_container busybox +docker export base_image_container | gzip > base.tar.gz +BASE=./base.tar.gz +make all +``` + +If the build is successful, in the `./out` folder you should see: +```sh +> ls ./out/ +delta.tar.gz initrd.img rootfs.tar.gz +``` + +### Containerd Shim +For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. + +```powershell +C:\> $env:GOOS="windows" +C:\> go build .\cmd\containerd-shim-runhcs-v1 +``` + +Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +```powershell +.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii +``` + +This config file will already have the shim set as the default runtime for cri interactions. + +To trial using the shim out with ctr.exe: +```powershell +C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" +``` ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. @@ -16,7 +70,27 @@ When you submit a pull request, a CLA-bot will automatically determine whether y a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. -We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project. +We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to +certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for +more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure +that all commits in a given PR are signed-off. + +### Test Directory (Important to note) + +This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this +project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has +its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file +has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project +(which is the repo itself on your disk). + +``` +replace ( + github.com/Microsoft/hcsshim => ../ +) +``` + +Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the +CI in this project will check if the files are out of date and will fail if this is true. ## Code of Conduct diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index 644f0ab711f..e21354ffd66 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -78,6 +78,13 @@ var ( // ErrNotSupported is an error encountered when hcs doesn't support the request ErrPlatformNotSupported = errors.New("unsupported platform request") + + // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. + ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) + + // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that + // compute system has already been closed. + ErrInvalidHandle = syscall.Errno(0x6) ) type ErrorEvent struct { @@ -249,6 +256,14 @@ func IsNotExist(err error) bool { err == ErrElementNotFound } +// IsErrorInvalidHandle checks whether the error is the result of an operation carried +// out on a handle that is invalid/closed. This error popped up while trying to query +// stats on a container in the process of being stopped. +func IsErrorInvalidHandle(err error) bool { + err = getInnerError(err) + return err == ErrInvalidHandle +} + // IsAlreadyClosed checks if an error is caused by the Container or Process having been // already closed by a call to the Close() method. func IsAlreadyClosed(err error) bool { @@ -281,6 +296,7 @@ func IsTimeout(err error) bool { func IsAlreadyStopped(err error) bool { err = getInnerError(err) return err == ErrVmcomputeAlreadyStopped || + err == ErrProcessAlreadyStopped || err == ErrElementNotFound } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 8f203466887..f4605922ab4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -3,7 +3,9 @@ package hcs import ( "context" "encoding/json" + "errors" "io" + "os" "sync" "syscall" "time" @@ -16,16 +18,17 @@ import ( // ContainerError is an error encountered in HCS type Process struct { - handleLock sync.RWMutex - handle vmcompute.HcsProcess - processID int - system *System - hasCachedStdio bool - stdioLock sync.Mutex - stdin io.WriteCloser - stdout io.ReadCloser - stderr io.ReadCloser - callbackNumber uintptr + handleLock sync.RWMutex + handle vmcompute.HcsProcess + processID int + system *System + hasCachedStdio bool + stdioLock sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + callbackNumber uintptr + killSignalDelivered bool closedWaitOnce sync.Once waitBlock chan struct{} @@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) { return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) } + if process.killSignalDelivered { + // A kill signal has already been sent to this process. Sending a second + // one offers no real benefit, as processes cannot stop themselves from + // being terminated, once a TerminateProcess has been issued. Sending a + // second kill may result in a number of errors (two of which detailed bellow) + // and which we can avoid handling. + return true, nil + } + resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) + if err != nil { + // We still need to check these two cases, as processes may still be killed by an + // external actor (human operator, OOM, random script etc). + if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) { + // There are two cases where it should be safe to ignore an error returned + // by HcsTerminateProcess. The first one is cause by the fact that + // HcsTerminateProcess ends up calling TerminateProcess in the context + // of a container. According to the TerminateProcess documentation: + // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks + // After a process has terminated, call to TerminateProcess with open + // handles to the process fails with ERROR_ACCESS_DENIED (5) error code. + // It's safe to ignore this error here. HCS should always have permissions + // to kill processes inside any container. So an ERROR_ACCESS_DENIED + // is unlikely to be anything else than what the ending remarks in the + // documentation states. + // + // The second case is generated by hcs itself, if for any reason HcsTerminateProcess + // is called twice in a very short amount of time. In such cases, hcs may return + // HCS_E_PROCESS_ALREADY_STOPPED. + return true, nil + } + } events := processHcsResult(ctx, resultJSON) delivered, err := process.processSignalResult(ctx, err) if err != nil { err = makeProcessError(process, operation, err, events) } + + process.killSignalDelivered = delivered return delivered, err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go index bcfeb34d549..70884aad75f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/attachment.go @@ -27,4 +27,10 @@ type Attachment struct { CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` ReadOnly bool `json:"ReadOnly,omitempty"` + + SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"` + + AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"` + + ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go index 4fb23107683..39a54432c02 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/container.go @@ -31,4 +31,6 @@ type Container struct { RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` AssignedDevices []Device `json:"AssignedDevices,omitempty"` + + AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go index f1a28cd3898..0be0475d41a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_config.go @@ -14,5 +14,5 @@ type CpuGroupConfig struct { Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` // Hypervisor CPU group IDs exposed to clients - HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"` + HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go index 107caddadae..31c4538affc 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/device.go @@ -12,9 +12,9 @@ package hcsschema type DeviceType string const ( - ClassGUID DeviceType = "ClassGuid" - DeviceInstance DeviceType = "DeviceInstance" - GPUMirror DeviceType = "GpuMirror" + ClassGUID DeviceType = "ClassGuid" + DeviceInstanceID DeviceType = "DeviceInstance" + GPUMirror DeviceType = "GpuMirror" ) type Device struct { @@ -22,6 +22,6 @@ type Device struct { Type DeviceType `json:"Type,omitempty"` // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` - // The location path of the device to assign to the container. Only used when Type is DeviceInstance. + // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID. LocationPath string `json:"LocationPath,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go new file mode 100644 index 00000000000..8dbe40b3be2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_container_definition_device.go @@ -0,0 +1,14 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ContainerDefinitionDevice struct { + DeviceExtension []DeviceExtension `json:"device_extension,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go new file mode 100644 index 00000000000..8fe89f92747 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_category.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceCategory struct { + Name string `json:"name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go new file mode 100644 index 00000000000..a62568d892e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_extension.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtension struct { + DeviceCategory *DeviceCategory `json:"device_category,omitempty"` + Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go new file mode 100644 index 00000000000..a7410febd6d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_instance.go @@ -0,0 +1,17 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceInstance struct { + Id string `json:"id,omitempty"` + LocationPath string `json:"location_path,omitempty"` + PortName string `json:"port_name,omitempty"` + InterfaceClass []InterfaceClass `json:"interface_class,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go new file mode 100644 index 00000000000..3553640647e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_device_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceNamespace struct { + RequiresDriverstore bool `json:"requires_driverstore,omitempty"` + DeviceCategory []DeviceCategory `json:"device_category,omitempty"` + DeviceInstance []DeviceInstance `json:"device_instance,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go new file mode 100644 index 00000000000..7be98b54107 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_interface_class.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type InterfaceClass struct { + Type_ string `json:"type,omitempty"` + Identifier string `json:"identifier,omitempty"` + Recurse bool `json:"recurse,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go new file mode 100644 index 00000000000..3ab9cf1ecf0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_namespace.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DeviceExtensionNamespace struct { + Ob *ObjectNamespace `json:"ob,omitempty"` + Device *DeviceNamespace `json:"device,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go new file mode 100644 index 00000000000..d2f51b3b53c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_directory.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectDirectory struct { + Name string `json:"name,omitempty"` + Clonesd string `json:"clonesd,omitempty"` + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go new file mode 100644 index 00000000000..47dfb55bfa8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_namespace.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectNamespace struct { + Shadow string `json:"shadow,omitempty"` + Symlink []ObjectSymlink `json:"symlink,omitempty"` + Objdir []ObjectDirectory `json:"objdir,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go new file mode 100644 index 00000000000..8867ebe5f02 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/model_object_symlink.go @@ -0,0 +1,18 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type ObjectSymlink struct { + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + Scope string `json:"scope,omitempty"` + Pathtoclone string `json:"pathtoclone,omitempty"` + AccessMask int32 `json:"access_mask,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go new file mode 100644 index 00000000000..9ef322f615b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_p_mem_mapping.go @@ -0,0 +1,15 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualPMemMapping struct { + HostPath string `json:"HostPath,omitempty"` + ImageFormat string `json:"ImageFormat,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index 262714b4d0f..7cf954c7b26 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -20,6 +20,7 @@ type HNSEndpoint struct { IPv6Address net.IP `json:",omitempty"` DNSSuffix string `json:",omitempty"` DNSServerList string `json:",omitempty"` + DNSDomain string `json:",omitempty"` GatewayAddress string `json:",omitempty"` GatewayAddressV6 string `json:",omitempty"` EnableInternalDNS bool `json:",omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go index 6765aaead5e..591a2631e45 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -22,9 +22,9 @@ const ( type NatPolicy struct { Type PolicyType `json:"Type"` - Protocol string - InternalPort uint16 - ExternalPort uint16 + Protocol string `json:",omitempty"` + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` } type QosPolicy struct { @@ -88,20 +88,20 @@ const ( type ACLPolicy struct { Type PolicyType `json:"Type"` Id string `json:"Id,omitempty"` - Protocol uint16 - Protocols string `json:"Protocols,omitempty"` - InternalPort uint16 + Protocol uint16 `json:",omitempty"` + Protocols string `json:"Protocols,omitempty"` + InternalPort uint16 `json:",omitempty"` Action ActionType Direction DirectionType - LocalAddresses string - RemoteAddresses string - LocalPorts string `json:"LocalPorts,omitempty"` - LocalPort uint16 - RemotePorts string `json:"RemotePorts,omitempty"` - RemotePort uint16 - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 - ServiceName string + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 `json:",omitempty"` + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 `json:",omitempty"` + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 `json:",omitempty"` + ServiceName string `json:",omitempty"` } type Policy struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go index ff81ac2c156..5debe974d41 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -21,7 +21,7 @@ func ActivateLayer(ctx context.Context, path string) (err error) { err = activateLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go index ffee31ab126..480aee8725c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -21,7 +21,7 @@ func CreateLayer(ctx context.Context, path, parent string) (err error) { err = createLayer(&stdDriverInfo, path, parent) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go index 5a3809ae229..131aa94f14b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -28,7 +28,7 @@ func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []str err = createSandboxLayer(&stdDriverInfo, path, 0, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go index 787054e7941..424467ac331 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -19,7 +19,7 @@ func DestroyLayer(ctx context.Context, path string) (err error) { err = destroyLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index 22f7605beee..035c9041e68 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -25,7 +25,7 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error err = expandSandboxSize(&stdDriverInfo, path, size) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } // Manually expand the volume now in order to work around bugs in 19H1 and diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go index 09f0de1a446..97b27eb7d6b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -35,7 +35,7 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go index 4d22d0ecf3d..8d213f5871a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -27,7 +27,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { log.G(ctx).Debug("Calling proc (1)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) if err != nil { - return "", hcserror.New(err, title+" - failed", "(first call)") + return "", hcserror.New(err, title, "(first call)") } // Allocate a mount path of the returned length. @@ -41,7 +41,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { log.G(ctx).Debug("Calling proc (2)") err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) if err != nil { - return "", hcserror.New(err, title+" - failed", "(second call)") + return "", hcserror.New(err, title, "(second call)") } mountPath := syscall.UTF16ToString(mountPathp[0:]) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go index bcc8fbd4239..ae1fff84036 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -21,7 +21,7 @@ func GetSharedBaseImages(ctx context.Context) (_ string, err error) { var buffer *uint16 err = getBaseImages(&buffer) if err != nil { - return "", hcserror.New(err, title+" - failed", "") + return "", hcserror.New(err, title, "") } imageData := interop.ConvertAndFreeCoTaskMemString(buffer) span.AddAttributes(trace.StringAttribute("imageData", imageData)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go index 3eaca27808e..4b282fef9db 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -20,7 +20,7 @@ func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error err = grantVmAccess(vmid, filepath) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go index b3c150d66fe..687550f0be3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -36,7 +36,7 @@ func ImportLayer(ctx context.Context, path string, importFolderPath string, pare err = importLayer(&stdDriverInfo, path, importFolderPath, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go index c6999973cb8..01e67233939 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -21,7 +21,7 @@ func LayerExists(ctx context.Context, path string) (_ bool, err error) { var exists uint32 err = layerExists(&stdDriverInfo, path, &exists) if err != nil { - return false, hcserror.New(err, title+" - failed", "") + return false, hcserror.New(err, title, "") } span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) return exists != 0, nil diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index 83ba72cfada..b7f3064f26b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -76,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) { defer tf.Close() s := bufio.NewScanner(tf) if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("Invalid tombstones file") + return nil, errors.New("invalid tombstones file") } ts := make(map[string]([]string)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go index bcf39c6b86c..09950297cef 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -17,12 +17,12 @@ func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() - span.AddAttributes(trace.StringAttribute("name", name)) + span.AddAttributes(trace.StringAttribute("objectName", name)) var id guid.GUID err = nameToGuid(name, &id) if err != nil { - return guid.GUID{}, hcserror.New(err, title+" - failed", "") + return guid.GUID{}, hcserror.New(err, title, "") } span.AddAttributes(trace.StringAttribute("guid", id.String())) return id, nil diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go index 55f7730d0c1..90129faefbb 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -38,7 +38,7 @@ func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) ( defer prepareLayerLock.Unlock() err = prepareLayer(&stdDriverInfo, path, layers) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go index 79fb986786c..71b130c525f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -19,7 +19,7 @@ func UnprepareLayer(ctx context.Context, path string) (err error) { err = unprepareLayer(&stdDriverInfo, path) if err != nil { - return hcserror.New(err, title+" - failed", "") + return hcserror.New(err, title, "") } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go new file mode 100644 index 00000000000..def9525417e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -0,0 +1,44 @@ +package winapi + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error { + // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand. + return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size)))) +} + +// HRESULT WINAPI CreatePseudoConsole( +// _In_ COORD size, +// _In_ HANDLE hInput, +// _In_ HANDLE hOutput, +// _In_ DWORD dwFlags, +// _Out_ HPCON* phPC +// ); +// +//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole + +// void WINAPI ClosePseudoConsole( +// _In_ HPCON hPC +// ); +// +//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole + +// HRESULT WINAPI ResizePseudoConsole( +// _In_ HPCON hPC , +// _In_ COORD size +// ); +// +//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go index 83f70406446..53f62948c90 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/memory.go @@ -1,27 +1,4 @@ package winapi -// VOID RtlMoveMemory( -// _Out_ VOID UNALIGNED *Destination, -// _In_ const VOID UNALIGNED *Source, -// _In_ SIZE_T Length -// ); -//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory - //sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc //sys LocalFree(ptr uintptr) = kernel32.LocalFree - -// BOOL QueryWorkingSet( -// HANDLE hProcess, -// PVOID pv, -// DWORD cb -// ); -//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet - -type PSAPI_WORKING_SET_INFORMATION struct { - NumberOfEntries uintptr - WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK -} - -type PSAPI_WORKING_SET_BLOCK struct { - Flags uintptr -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go index b87068327cc..37839435b93 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -2,9 +2,7 @@ package winapi const PROCESS_ALL_ACCESS uint32 = 2097151 -// DWORD GetProcessImageFileNameW( -// HANDLE hProcess, -// LPWSTR lpImageFileName, -// DWORD nSize -// ); -//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW +const ( + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 + PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go index db59567d089..859b753c246 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -20,36 +20,41 @@ func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { return } +// UnicodeString corresponds to UNICODE_STRING win32 struct defined here +// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string type UnicodeString struct { Length uint16 MaximumLength uint16 Buffer *uint16 } +// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value +// denotes the maximum number of wide chars a path can have. +const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 + //String converts a UnicodeString to a golang string func (uni UnicodeString) String() string { // UnicodeString is not guaranteed to be null terminated, therefore // use the UnicodeString's Length field - return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) + return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) } // NewUnicodeString allocates a new UnicodeString and copies `s` into // the buffer of the new UnicodeString. func NewUnicodeString(s string) (*UnicodeString, error) { - // Get length of original `s` to use in the UnicodeString since the `buf` - // created later will have an additional trailing null character - length := len(s) - if length > 32767 { - return nil, syscall.ENAMETOOLONG - } - buf, err := windows.UTF16FromString(s) if err != nil { return nil, err } + + if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH { + return nil, syscall.ENAMETOOLONG + } + uni := &UnicodeString{ - Length: uint16(length * 2), - MaximumLength: uint16(length * 2), + // The length is in bytes and should not include the trailing null character. + Length: uint16((len(buf) - 1) * 2), + MaximumLength: uint16((len(buf) - 1) * 2), Buffer: &buf[0], } return uni, nil diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go index ec88c0d2128..1d4ba3c4f8e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -2,4 +2,4 @@ // be thought of as an extension to golang.org/x/sys/windows. package winapi -//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go +//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index 2941b0f9809..4eb64b4c0c4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -37,13 +37,15 @@ func errnoErr(e syscall.Errno) error { } var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modntdll = windows.NewLazySystemDLL("ntdll.dll") modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modpsapi = windows.NewLazySystemDLL("psapi.dll") modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procSearchPathW = modkernel32.NewProc("SearchPathW") @@ -57,11 +59,8 @@ var ( procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") procLogonUserW = modadvapi32.NewProc("LogonUserW") - procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory") procLocalAlloc = modkernel32.NewProc("LocalAlloc") procLocalFree = modkernel32.NewProc("LocalFree") - procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet") - procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") @@ -74,6 +73,33 @@ var ( procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") ) +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + return +} + +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) status = uint32(r0) @@ -219,18 +245,6 @@ func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uin return } -func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func LocalAlloc(flags uint32, size int) (ptr uintptr) { r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) ptr = uintptr(r0) @@ -242,31 +256,6 @@ func LocalFree(ptr uintptr) { return } -func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize)) - size = uint32(r0) - if size == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) amount = uint32(r0) diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index e9267b9554f..75dce5d821d 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -35,4 +35,16 @@ const ( // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). V20H2 = 19042 + + // V21H1 corresponds to Windows Server 21H1 (semi-annual channel). + V21H1 = 19043 + + // V21H2Win10 corresponds to Windows 10 (November 2021 Update). + V21H2Win10 = 19044 + + // V21H2Server corresponds to Windows Server 2022 (ltsc2022). + V21H2Server = 20348 + + // V21H2Win11 corresponds to Windows 11 (original release). + V21H2Win11 = 22000 ) diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS new file mode 100644 index 00000000000..2b00ddba0df --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS new file mode 100644 index 00000000000..1fbd3e976fa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/bits-and-blooms/bitset/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE similarity index 96% rename from vendor/github.com/bits-and-blooms/bitset/LICENSE rename to vendor/github.com/ProtonMail/go-crypto/LICENSE index 59cab8a939b..6a66aea5eaf 100644 --- a/vendor/github.com/bits-and-blooms/bitset/LICENSE +++ b/vendor/github.com/ProtonMail/go-crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014 Will Fitzgerald. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/ProtonMail/go-crypto/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go new file mode 100644 index 00000000000..3ed3f435737 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go @@ -0,0 +1,381 @@ +package bitcurves + +// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2011 ThePiachu. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitelliptic implements several Koblitz elliptic curves over prime +// fields. + +// This package operates, internally, on Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole +// calculation can be performed within the transform (as in ScalarMult and +// ScalarBaseMult). But even for Add and Double, it's faster to apply and +// reverse the transform than to operate in affine coordinates. + +import ( + "crypto/elliptic" + "io" + "math/big" + "sync" +) + +// A BitCurve represents a Koblitz Curve with a=0. +// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html +type BitCurve struct { + Name string + P *big.Int // the order of the underlying field + N *big.Int // the order of the base point + B *big.Int // the constant of the BitCurve equation + Gx, Gy *big.Int // (x,y) of the base point + BitSize int // the size of the underlying field +} + +// Params returns the parameters of the given BitCurve (see BitCurve struct) +func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { + cp = new(elliptic.CurveParams) + cp.Name = bitCurve.Name + cp.P = bitCurve.P + cp.N = bitCurve.N + cp.Gx = bitCurve.Gx + cp.Gy = bitCurve.Gy + cp.BitSize = bitCurve.BitSize + return cp +} + +// IsOnCurve returns true if the given (x,y) lies on the BitCurve. +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { + // y² = x³ + b + y2 := new(big.Int).Mul(y, y) //y² + y2.Mod(y2, bitCurve.P) //y²%P + + x3 := new(big.Int).Mul(x, x) //x² + x3.Mul(x3, x) //x³ + + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P + + return x3.Cmp(y2) == 0 +} + +// affineFromJacobian reverses the Jacobian transform. See the comment at the +// top of the file. +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { + if z.Cmp(big.NewInt(0)) == 0 { + panic("bitcurve: Can't convert to affine with Jacobian Z = 0") + } + // x = YZ^2 mod P + zinv := new(big.Int).ModInverse(z, bitCurve.P) + zinvsq := new(big.Int).Mul(zinv, zinv) + + xOut = new(big.Int).Mul(x, zinvsq) + xOut.Mod(xOut, bitCurve.P) + // y = YZ^3 mod P + zinvsq.Mul(zinvsq, zinv) + yOut = new(big.Int).Mul(y, zinvsq) + yOut.Mod(yOut, bitCurve.P) + return xOut, yOut +} + +// Add returns the sum of (x1,y1) and (x2,y2) +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + z := new(big.Int).SetInt64(1) + x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) + return bitCurve.affineFromJacobian(x, y, z) +} + +// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and +// (x2, y2, z2) and returns their sum, also in Jacobian form. +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + z1z1 := new(big.Int).Mul(z1, z1) + z1z1.Mod(z1z1, bitCurve.P) + z2z2 := new(big.Int).Mul(z2, z2) + z2z2.Mod(z2z2, bitCurve.P) + + u1 := new(big.Int).Mul(x1, z2z2) + u1.Mod(u1, bitCurve.P) + u2 := new(big.Int).Mul(x2, z1z1) + u2.Mod(u2, bitCurve.P) + h := new(big.Int).Sub(u2, u1) + if h.Sign() == -1 { + h.Add(h, bitCurve.P) + } + i := new(big.Int).Lsh(h, 1) + i.Mul(i, i) + j := new(big.Int).Mul(h, i) + + s1 := new(big.Int).Mul(y1, z2) + s1.Mul(s1, z2z2) + s1.Mod(s1, bitCurve.P) + s2 := new(big.Int).Mul(y2, z1) + s2.Mul(s2, z1z1) + s2.Mod(s2, bitCurve.P) + r := new(big.Int).Sub(s2, s1) + if r.Sign() == -1 { + r.Add(r, bitCurve.P) + } + r.Lsh(r, 1) + v := new(big.Int).Mul(u1, i) + + x3 := new(big.Int).Set(r) + x3.Mul(x3, x3) + x3.Sub(x3, j) + x3.Sub(x3, v) + x3.Sub(x3, v) + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Set(r) + v.Sub(v, x3) + y3.Mul(y3, v) + s1.Mul(s1, j) + s1.Lsh(s1, 1) + y3.Sub(y3, s1) + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Add(z1, z2) + z3.Mul(z3, z3) + z3.Sub(z3, z1z1) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Sub(z3, z2z2) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Mul(z3, h) + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// Double returns 2*(x,y) +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + z1 := new(big.Int).SetInt64(1) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) +} + +// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and +// returns its double, also in Jacobian form. +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + + a := new(big.Int).Mul(x, x) //X1² + b := new(big.Int).Mul(y, y) //Y1² + c := new(big.Int).Mul(b, b) //B² + + d := new(big.Int).Add(x, b) //X1+B + d.Mul(d, d) //(X1+B)² + d.Sub(d, a) //(X1+B)²-A + d.Sub(d, c) //(X1+B)²-A-C + d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) + + e := new(big.Int).Mul(big.NewInt(3), a) //3*A + f := new(big.Int).Mul(e, e) //E² + + x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D + x3.Sub(f, x3) //F-2*D + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Sub(d, x3) //D-X3 + y3.Mul(e, y3) //E*(D-X3) + y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Mul(y, z) //Y1*Z1 + z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +//TODO: double check if it is okay +// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { + // We have a slight problem in that the identity of the group (the + // point at infinity) cannot be represented in (x, y) form on a finite + // machine. Thus the standard add/double algorithm has to be tweaked + // slightly: our initial state is not the identity, but x, and we + // ignore the first true bit in |k|. If we don't find any true bits in + // |k|, then we return nil, nil, because we cannot return the identity + // element. + + Bz := new(big.Int).SetInt64(1) + x := Bx + y := By + z := Bz + + seenFirstTrue := false + for _, byte := range k { + for bitNum := 0; bitNum < 8; bitNum++ { + if seenFirstTrue { + x, y, z = bitCurve.doubleJacobian(x, y, z) + } + if byte&0x80 == 0x80 { + if !seenFirstTrue { + seenFirstTrue = true + } else { + x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) + } + } + byte <<= 1 + } + } + + if !seenFirstTrue { + return nil, nil + } + + return bitCurve.affineFromJacobian(x, y, z) +} + +// ScalarBaseMult returns k*G, where G is the base point of the group and k is +// an integer in big-endian form. +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) +} + +var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} + +//TODO: double check if it is okay +// GenerateKey returns a public/private key pair. The private key is generated +// using the given reader, which must return random data. +func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { + byteLen := (bitCurve.BitSize + 7) >> 3 + priv = make([]byte, byteLen) + + for x == nil { + _, err = io.ReadFull(rand, priv) + if err != nil { + return + } + // We have to mask off any excess bits in the case that the size of the + // underlying field is not a whole number of bytes. + priv[0] &= mask[bitCurve.BitSize%8] + // This is because, in tests, rand will return all zeros and we don't + // want to get the point at infinity and loop forever. + priv[1] ^= 0x42 + x, y = bitCurve.ScalarBaseMult(priv) + } + return +} + +// Marshal converts a point into the form specified in section 4.3.6 of ANSI +// X9.62. +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 + + ret := make([]byte, 1+2*byteLen) + ret[0] = 4 // uncompressed point + + xBytes := x.Bytes() + copy(ret[1+byteLen-len(xBytes):], xBytes) + yBytes := y.Bytes() + copy(ret[1+2*byteLen-len(yBytes):], yBytes) + return ret +} + +// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On +// error, x = nil. +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 + if len(data) != 1+2*byteLen { + return + } + if data[0] != 4 { // uncompressed form + return + } + x = new(big.Int).SetBytes(data[1 : 1+byteLen]) + y = new(big.Int).SetBytes(data[1+byteLen:]) + return +} + +//curve parameters taken from: +//http://www.secg.org/collateral/sec2_final.pdf + +var initonce sync.Once +var secp160k1 *BitCurve +var secp192k1 *BitCurve +var secp224k1 *BitCurve +var secp256k1 *BitCurve + +func initAll() { + initS160() + initS192() + initS224() + initS256() +} + +func initS160() { + // See SEC 2 section 2.4.1 + secp160k1 = new(BitCurve) + secp160k1.Name = "secp160k1" + secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) + secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) + secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) + secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) + secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) + secp160k1.BitSize = 160 +} + +func initS192() { + // See SEC 2 section 2.5.1 + secp192k1 = new(BitCurve) + secp192k1.Name = "secp192k1" + secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) + secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) + secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) + secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) + secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) + secp192k1.BitSize = 192 +} + +func initS224() { + // See SEC 2 section 2.6.1 + secp224k1 = new(BitCurve) + secp224k1.Name = "secp224k1" + secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) + secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) + secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) + secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) + secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) + secp224k1.BitSize = 224 +} + +func initS256() { + // See SEC 2 section 2.7.1 + secp256k1 = new(BitCurve) + secp256k1.Name = "secp256k1" + secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) + secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) + secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) + secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) + secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) + secp256k1.BitSize = 256 +} + +// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) +func S160() *BitCurve { + initonce.Do(initAll) + return secp160k1 +} + +// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) +func S192() *BitCurve { + initonce.Do(initAll) + return secp192k1 +} + +// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) +func S224() *BitCurve { + initonce.Do(initAll) + return secp224k1 +} + +// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) +func S256() *BitCurve { + initonce.Do(initAll) + return secp256k1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go new file mode 100644 index 00000000000..77fb8b9a046 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for enviroments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go new file mode 100644 index 00000000000..2d5355085f2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go new file mode 100644 index 00000000000..6b6bc7aed04 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go @@ -0,0 +1,162 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package eax provides an implementation of the EAX +// (encrypt-authenticate-translate) mode of operation, as described in +// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS +// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." +// In FSE'04, volume 3017 of LNCS, 2004 +package eax + +import ( + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +const ( + defaultTagSize = 16 + defaultNonceSize = 16 +) + +type eax struct { + block cipher.Block // Only AES-{128, 192, 256} supported + tagSize int // At least 12 bytes recommended + nonceSize int +} + +func (e *eax) NonceSize() int { + return e.nonceSize +} + +func (e *eax) Overhead() int { + return e.tagSize +} + +// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and +// tag lengths. Supports {128, 192, 256}- bit key length. +func NewEAX(block cipher.Block) (cipher.AEAD, error) { + return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and +// given nonce and tag lengths in bytes. Panics on zero nonceSize and +// exceedingly long tags. +// +// It is recommended to use at least 12 bytes as tag length (see, for instance, +// NIST SP 800-38D). +// +// Only to be used for compatibility with existing cryptosystems with +// non-standard parameters. For all other cases, prefer NewEAX. +func NewEAXWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if nonceSize < 1 { + return nil, eaxError("Cannot initialize EAX with nonceSize = 0") + } + if tagSize > block.BlockSize() { + return nil, eaxError("Custom tag length exceeds blocksize") + } + return &eax{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + }, nil +} + +func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize) + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + + // Encrypt message using CTR mode and omacNonce as IV + ctr := cipher.NewCTR(e.block, omacNonce) + ciphertextData := out[:len(plaintext)] + ctr.XORKeyStream(ciphertextData, plaintext) + + omacCiphertext := e.omacT(2, ciphertextData) + + tag := out[len(plaintext):] + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + return ret +} + +func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + if len(ciphertext) < e.tagSize { + return nil, eaxError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - e.tagSize + + // Compute tag + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + omacCiphertext := e.omacT(2, ciphertext[:sep]) + + tag := make([]byte, e.tagSize) + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + + // Compare tags + if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { + return nil, eaxError("Tag authentication failed") + } + + // Decrypt ciphertext + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ctr := cipher.NewCTR(e.block, omacNonce) + ctr.XORKeyStream(out, ciphertext[:sep]) + + return ret[:sep], nil +} + +// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) +func (e *eax) omacT(t byte, plaintext []byte) []byte { + blockSize := e.block.BlockSize() + byteT := make([]byte, blockSize) + byteT[blockSize-1] = t + concat := append(byteT, plaintext...) + return e.omac(concat) +} + +func (e *eax) omac(plaintext []byte) []byte { + blockSize := e.block.BlockSize() + // L ← E_K(0^n); B ← 2L; P ← 4L + L := make([]byte, blockSize) + e.block.Encrypt(L, L) + B := byteutil.GfnDouble(L) + P := byteutil.GfnDouble(B) + + // CBC with IV = 0 + cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) + padded := e.pad(plaintext, B, P) + cbcCiphertext := make([]byte, len(padded)) + cbc.CryptBlocks(cbcCiphertext, padded) + + return cbcCiphertext[len(cbcCiphertext)-blockSize:] +} + +func (e *eax) pad(plaintext, B, P []byte) []byte { + // if |M| in {n, 2n, 3n, ...} + blockSize := e.block.BlockSize() + if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { + return byteutil.RightXor(plaintext, B) + } + + // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P + ending := make([]byte, blockSize-len(plaintext)%blockSize) + ending[0] = 0x80 + padded := append(plaintext, ending...) + return byteutil.RightXor(padded, P) +} + +func eaxError(err string) error { + return errors.New("crypto/eax: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go new file mode 100644 index 00000000000..ddb53d07905 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go @@ -0,0 +1,58 @@ +package eax + +// Test vectors from +// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf +var testVectors = []struct { + msg, key, nonce, header, ciphertext string +}{ + {"", + "233952DEE4D5ED5F9B9C6D6FF80FF478", + "62EC67F9C3A4A407FCB2A8C49031A8B3", + "6BFB914FD07EAE6B", + "E037830E8389F27B025A2D6527E79D01"}, + {"F7FB", + "91945D3F4DCBEE0BF45EF52255F095A4", + "BECAF043B0A23D843194BA972C66DEBD", + "FA3BFD4806EB53FA", + "19DD5C4C9331049D0BDAB0277408F67967E5"}, + {"1A47CB4933", + "01F74AD64077F2E704C0F60ADA3DD523", + "70C3DB4F0D26368400A10ED05D2BFF5E", + "234A3463C1264AC6", + "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, + {"481C9E39B1", + "D07CF6CBB7F313BDDE66B727AFD3C5E8", + "8408DFFF3C1A2B1292DC199E46B7D617", + "33CCE2EABFF5A79D", + "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, + {"40D0C07DA5E4", + "35B6D0580005BBC12B0587124557D2C2", + "FDB6B06676EEDC5C61D74276E1F8E816", + "AEB96EAEBE2970E9", + "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, + {"4DE3B35C3FC039245BD1FB7D", + "BD8E6E11475E60B268784C38C62FEB22", + "6EAC5C93072D8E8513F750935E46DA1B", + "D4482D1CA78DCE0F", + "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, + {"8B0A79306C9CE7ED99DAE4F87F8DD61636", + "7C77D6E813BED5AC98BAA417477A2E7D", + "1A8C98DCD73D38393B2BF1569DEEFC19", + "65D2017990D62528", + "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, + {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", + "5FFF20CAFAB119CA2FC73549E20F5B0D", + "DDE59B97D722156D4D9AFF2BC7559826", + "54B9F04E6A09189A", + "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, + {"6CF36720872B8513F6EAB1A8A44438D5EF11", + "A4A4782BCFFD3EC5E7EF6D8C34A56123", + "B781FCF2F75FA5A8DE97A9CA48E522EC", + "899A175897561D7E", + "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, + {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", + "8395FCF1E95BEBD697BD010BC766AAC3", + "22E7ADD93CFC6393C57EC0B3C17D6B44", + "126735FCC320D25A", + "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go new file mode 100644 index 00000000000..4eb19f28d9c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go @@ -0,0 +1,131 @@ +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package eax + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + {"DFDE093F36B0356E5A81F609786982E3", + "1D8AC604419001816905BA72B14CED7E", + "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", + "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", + "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, + {"F10619EF02E5D94D7550EB84ED364A21", + "8DC0D4F2F745BBAE835CC5574B942D20", + "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", + "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", + "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, + {"429F514EFC64D98A698A9247274CFF45", + "976AA5EB072F912D126ACEBC954FEC38", + "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", + "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", + "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, + {"398138F309085F47F8457CDF53895A63", + "F8A8A7F2D28E5FFF7BBC2F24353F7A36", + "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", + "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", + "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, + {"7A4151EBD3901B42CBA45DAFB2E931BA", + "0FC88ACEE74DD538040321C330974EB8", + "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", + "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", + "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, + {"BFB147E1CD5459424F8C0271FC0E0DC5", + "EABCC126442BF373969EA3015988CC45", + "4C0880E1D71AA2C7", + "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", + "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, + {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", + "0F84B5D36CF4BC3B863313AF3B4D2E97", + "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", + "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", + "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, + {"44E6F2DC8FDC778AD007137D11410F50", + "270A237AD977F7187AA6C158A0BAB24F", + "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", + "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", + "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, + {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", + "8B4BBE26CCD9859DCD84884159D6B0A4", + "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", + "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", + "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, + {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", + "92745C018AA708ECFEB1667E9F3F1B01", + "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", + "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", + "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, + {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", + "0F45CF7F0BF31CCEB85D9DA10F4D749F", + "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", + "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", + "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, + {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", + "5B11EA1D6008EBB41CF892FCA5B943D1", + "BAF4FF5C8242", + "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", + "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, + {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", + "FD82B5B31804FF47D44199B533D0CF84", + "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", + "B804", + "164BB965C05EBE0931A1A63293EDF9C38C27"}, + {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", + "343BE00DA9483F05C14F2E9EB8EA6AE8", + "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", + "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", + "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, + {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", + "E280E13D7367042E3AA09A80111B6184", + "21486C9D7A9647", + "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", + "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, + {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", + "656B41CB3F9CF8C08BAD7EBFC80BD225", + "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", + "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", + "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, + {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", + "DA0F3A40983D92F2D4C01FED33C7A192", + "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", + "F37326A80E08", + "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, + {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", + "3EE1182AEBB19A02B128F28E1D5F7F99", + "D9F35ABB16D776CE", + "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", + "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, + {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", + "336F5D681E0410FAE7B607246092C6DC", + "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", + "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", + "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, + {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", + "AB91F7245DDCE3F1C747872D47BE0A8A", + "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", + "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", + "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, + {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", + "C691294EF67CD04D1B9242AF83DD1421", + "879334DAE3", + "1E17F46A98FEF5CBB40759D95354", + "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, + {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", + "4228DA0678CA3534588859E77DFF014C", + "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", + "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", + "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, + {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", + "943188480C99437495958B0AE4831AA9", + "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", + "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", + "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, + {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", + "B0490F5BD68A52459556B3749ACDF40E", + "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", + "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", + "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go new file mode 100644 index 00000000000..a6bdf51232e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019 ProtonTech AG +// This file contains necessary tools for the aex and ocb packages. +// +// These functions SHOULD NOT be used elsewhere, since they are optimized for +// specific input nature in the EAX and OCB modes of operation. + +package byteutil + +// GfnDouble computes 2 * input in the field of 2^n elements. +// The irreducible polynomial in the finite field for n=128 is +// x^128 + x^7 + x^2 + x + 1 (equals 0x87) +// Constant-time execution in order to avoid side-channel attacks +func GfnDouble(input []byte) []byte { + if len(input) != 16 { + panic("Doubling in GFn only implemented for n = 128") + } + // If the first bit is zero, return 2L = L << 1 + // Else return (L << 1) xor 0^120 10000111 + shifted := ShiftBytesLeft(input) + shifted[15] ^= ((input[0] >> 7) * 0x87) + return shifted +} + +// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. +func ShiftBytesLeft(x []byte) []byte { + l := len(x) + dst := make([]byte, l) + for i := 0; i < l-1; i++ { + dst[i] = (x[i] << 1) | (x[i+1] >> 7) + } + dst[l-1] = x[l-1] << 1 + return dst +} + +// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. +func ShiftNBytesLeft(dst, x []byte, n int) { + // Erase first n / 8 bytes + copy(dst, x[n/8:]) + + // Shift the remaining n % 8 bits + bits := uint(n % 8) + l := len(dst) + for i := 0; i < l-1; i++ { + dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits)) + } + dst[l-1] = dst[l-1] << bits + + // Append trailing zeroes + dst = append(dst, make([]byte, n/8)...) +} + +// XorBytesMut assumes equal input length, replaces X with X XOR Y +func XorBytesMut(X, Y []byte) { + for i := 0; i < len(X); i++ { + X[i] ^= Y[i] + } +} + + +// XorBytes assumes equal input length, puts X XOR Y into Z +func XorBytes(Z, X, Y []byte) { + for i := 0; i < len(X); i++ { + Z[i] = X[i] ^ Y[i] + } +} + +// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) +func RightXor(X, Y []byte) []byte { + offset := len(X) - len(Y) + xored := make([]byte, len(X)); + copy(xored, X) + for i := 0; i < len(Y); i++ { + xored[offset + i] ^= Y[i] + } + return xored +} + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go new file mode 100644 index 00000000000..7f78cfa759b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -0,0 +1,317 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package ocb provides an implementation of the OCB (offset codebook) mode of +// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, +// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT +// AUTHENTICATED ENCRYPTION (2003). +// Security considerations (from RFC-7253): A private key MUST NOT be used to +// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a +// brute-force forging adversary succeeds after 2^{tag length} attempts). A +// single key SHOULD NOT be used to decrypt ciphertext with different tag +// lengths. Nonces need not be secret, but MUST NOT be reused. +// This package only supports underlying block ciphers with 128-bit blocks, +// such as AES-{128, 192, 256}, but may be extended to other sizes. +package ocb + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" + "math/bits" +) + +type ocb struct { + block cipher.Block + tagSize int + nonceSize int + mask mask + // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' + // internal variable can be reused for en/decrypting with nonces sharing + // all but the last 6 bits with N. The prefix of the first nonce used to + // compute the new Ktop, and the Ktop value itself, are stored in + // reusableKtop. If using incremental nonces, this saves one block cipher + // call every 63 out of 64 OCB encryptions, and stores one nonce and one + // output of the block cipher in memory only. + reusableKtop reusableKtop +} + +type mask struct { + // L_*, L_$, (L_i)_{i ∈ N} + lAst []byte + lDol []byte + L [][]byte +} + +type reusableKtop struct { + noncePrefix []byte + Ktop []byte +} + +const ( + defaultTagSize = 16 + defaultNonceSize = 15 +) + +const ( + enc = iota + dec +) + +func (o *ocb) NonceSize() int { + return o.nonceSize +} + +func (o *ocb) Overhead() int { + return o.tagSize +} + +// NewOCB returns an OCB instance with the given block cipher and default +// tag and nonce sizes. +func NewOCB(block cipher.Block) (cipher.AEAD, error) { + return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewOCBWithNonceAndTagSize returns an OCB instance with the given block +// cipher, nonce length, and tag length. Panics on zero nonceSize and +// exceedingly long tag size. +// +// It is recommended to use at least 12 bytes as tag length. +func NewOCBWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if block.BlockSize() != 16 { + return nil, ocbError("Block cipher must have 128-bit blocks") + } + if nonceSize < 1 { + return nil, ocbError("Incorrect nonce length") + } + if nonceSize >= block.BlockSize() { + return nil, ocbError("Nonce length exceeds blocksize - 1") + } + if tagSize > block.BlockSize() { + return nil, ocbError("Custom tag length exceeds blocksize") + } + return &ocb{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + mask: initializeMaskTable(block), + reusableKtop: reusableKtop{ + noncePrefix: nil, + Ktop: nil, + }, + }, nil +} + +func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > o.nonceSize { + panic("crypto/ocb: Incorrect nonce length given to OCB") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) + o.crypt(enc, out, nonce, adata, plaintext) + return ret +} + +func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > o.nonceSize { + panic("Nonce too long for this instance") + } + if len(ciphertext) < o.tagSize { + return nil, ocbError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - o.tagSize + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ciphertextData := ciphertext[:sep] + tag := ciphertext[sep:] + o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { + ret = ret[:sep] + return ret, nil + } + for i := range out { + out[i] = 0 + } + return nil, ocbError("Tag authentication failed") +} + +// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) +// function. It returns the resulting plain/ciphertext with the tag appended. +func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { + // + // Consider X as a sequence of 128-bit blocks + // + // Note: For encryption (resp. decryption), X is the plaintext (resp., the + // ciphertext without the tag). + blockSize := o.block.BlockSize() + + // + // Nonce-dependent and per-encryption variables + // + // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop + // is already computed. + truncatedNonce := make([]byte, len(nonce)) + copy(truncatedNonce, nonce) + truncatedNonce[len(truncatedNonce)-1] &= 192 + Ktop := make([]byte, blockSize) + if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { + Ktop = o.reusableKtop.Ktop + } else { + // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N + paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) + paddedNonce = append(paddedNonce, truncatedNonce...) + paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) + // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop + paddedNonce[blockSize-1] &= 192 + Ktop = paddedNonce + o.block.Encrypt(Ktop, Ktop) + o.reusableKtop.noncePrefix = truncatedNonce + o.reusableKtop.Ktop = Ktop + } + + // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) + xorHalves := make([]byte, blockSize/2) + byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) + stretch := append(Ktop, xorHalves...) + bottom := int(nonce[len(nonce)-1] & 63) + offset := make([]byte, len(stretch)) + byteutil.ShiftNBytesLeft(offset, stretch, bottom) + offset = offset[:blockSize] + + // + // Process any whole blocks + // + // Note: For encryption Y is ciphertext || tag, for decryption Y is + // plaintext || tag. + checksum := make([]byte, blockSize) + m := len(X) / blockSize + for i := 0; i < m; i++ { + index := bits.TrailingZeros(uint(i + 1)) + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) + blockX := X[i*blockSize : (i+1)*blockSize] + blockY := Y[i*blockSize : (i+1)*blockSize] + byteutil.XorBytes(blockY, blockX, offset) + switch instruction { + case enc: + o.block.Encrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockX) + case dec: + o.block.Decrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockY) + } + } + // + // Process any final partial block and compute raw tag + // + tag := make([]byte, blockSize) + if len(X)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + pad := make([]byte, blockSize) + o.block.Encrypt(pad, offset) + chunkX := X[blockSize*m:] + chunkY := Y[blockSize*m : len(X)] + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + switch instruction { + case enc: + paddedY := append(chunkX, byte(128)) + paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) + byteutil.XorBytesMut(checksum, paddedY) + case dec: + paddedX := append(chunkY, byte(128)) + paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) + byteutil.XorBytesMut(checksum, paddedX) + } + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) + } else { + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m:], tag[:o.tagSize]) + } + return Y +} + +// This hash function is used to compute the tag. Per design, on empty input it +// returns a slice of zeros, of the same length as the underlying block cipher +// block size. +func (o *ocb) hash(adata []byte) []byte { + // + // Consider A as a sequence of 128-bit blocks + // + A := make([]byte, len(adata)) + copy(A, adata) + blockSize := o.block.BlockSize() + + // + // Process any whole blocks + // + sum := make([]byte, blockSize) + offset := make([]byte, blockSize) + m := len(A) / blockSize + for i := 0; i < m; i++ { + chunk := A[blockSize*i : blockSize*(i+1)] + index := bits.TrailingZeros(uint(i + 1)) + // If the mask table is too short + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[index]) + byteutil.XorBytesMut(chunk, offset) + o.block.Encrypt(chunk, chunk) + byteutil.XorBytesMut(sum, chunk) + } + + // + // Process any final partial block; compute final hash value + // + if len(A)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + // Pad block with 1 || 0 ^ 127 - bitlength(a) + ending := make([]byte, blockSize-len(A)%blockSize) + ending[0] = 0x80 + encrypted := append(A[blockSize*m:], ending...) + byteutil.XorBytesMut(encrypted, offset) + o.block.Encrypt(encrypted, encrypted) + byteutil.XorBytesMut(sum, encrypted) + } + return sum +} + +func initializeMaskTable(block cipher.Block) mask { + // + // Key-dependent variables + // + lAst := make([]byte, block.BlockSize()) + block.Encrypt(lAst, lAst) + lDol := byteutil.GfnDouble(lAst) + L := make([][]byte, 1) + L[0] = byteutil.GfnDouble(lDol) + + return mask{ + lAst: lAst, + lDol: lDol, + L: L, + } +} + +// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) +func (m *mask) extendTable(limit int) { + for i := len(m.L); i <= limit; i++ { + m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) + } +} + +func ocbError(err string) error { + return errors.New("crypto/ocb: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go new file mode 100644 index 00000000000..0efaf344fd5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go @@ -0,0 +1,136 @@ +// In the test vectors provided by RFC 7253, the "bottom" +// internal variable, which defines "offset" for the first time, does not +// exceed 15. However, it can attain values up to 63. + +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package ocb + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + + {"9438C5D599308EAF13F800D2D31EA7F0", + "C38EE4801BEBFFA1CD8635BE", + "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", + "962D227786FB8913A8BAD5DC3250", + "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, + {"BA7DE631C7D6712167C6724F5B9A2B1D", + "35263EBDA05765DC0E71F1F5", + "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", + "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", + "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, + {"2E74B25289F6FD3E578C24866E9C72A5", + "FD912F15025AF8414642BA1D1D", + "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", + "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", + "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, + {"E7EC507C802528F790AFF5303A017B17", + "4B97A7A568940A9E3CE7A99E93031E", + "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", + "9455B3EA706B74", + "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, + {"6C928AA3224736F28EE7378DE0090191", + "8936138E2E4C6A13280017A1622D", + "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", + "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", + "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, + {"ECEA315CA4B3F425B0C9957A17805EA4", + "664CDAE18403F4F9BA13015A44FC", + "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", + "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", + "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, + {"4EE616C4A58AAA380878F71A373461F6", + "91B8C9C176D9C385E9C47E52", + "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", + "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", + "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, + {"DCD475773136C830D5E3D0C5FE05B7FF", + "BB8E1FBB483BE7616A922C4A", + "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", + "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", + "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, + {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", + "FACF804A4BEBF998505FF9DE", + "8213B9263B2971A5BDA18DBD02208EE1", + "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", + "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, + {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", + "0B0EF53D4606B28D1398355F", + "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", + "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", + "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, + {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", + "5C83F4896D0738E1366B1836", + "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", + "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", + "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, + {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", + "36B88F63ADBB5668588181D774", + "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", + "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", + "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, + {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", + "BC099AEB637361BAC536B57618", + "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", + "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", + "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, + {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", + "B1B0AAF373B8B026EB80422051D8", + "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", + "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", + "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, + {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", + "2F2F17CECF7E5A756D10785A3CB9DB", + "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", + "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", + "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, + {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", + "BCA625674F41D1E3AB47672DC0C3", + "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", + "E525422519ECE070E82C", + "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, + {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", + "36D2EC25ADD33CDEDF495205BBC923", + "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", + "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", + "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, + {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", + "81691D5D20EC818FCFF24B33DECC", + "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", + "EB861165DAF7625F827C6B574ED703F03215", + "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, + {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", + "D2A7B94DD12CDACA909D3AD7", + "E021A78F374FC271389AB9A3E97077D755", + "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", + "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, + {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", + "65ED3D63F79F90BBFD19775E", + "336A8C0B7243582A46B221AA677647FCAE91", + "134A8B34824A290E7B", + "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, + {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", + "C391A856B9FE234E14BA1AC7BB40FF", + "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", + "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", + "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, + {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", + "B15B61C8BB39261A8F55AB178EC3", + "D0729B6B75BB", + "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", + "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, + {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", + "01D7BDB1133E9C347486C1EFA6", + "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", + "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", + "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, + {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", + "0C52B3D11D636E5910A4DD76D32C", + "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", + "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", + "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go new file mode 100644 index 00000000000..843085589c8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go @@ -0,0 +1,78 @@ +package ocb + +import ( + "encoding/hex" +) + +// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is +// shared accross tests. +var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") + +var rfc7253testVectors = []struct { + nonce, header, plaintext, ciphertext string +}{ + {"BBAA99887766554433221100", + "", + "", + "785407BFFFC8AD9EDCC5520AC9111EE6"}, + {"BBAA99887766554433221101", + "0001020304050607", + "0001020304050607", + "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, + {"BBAA99887766554433221102", + "0001020304050607", + "", + "81017F8203F081277152FADE694A0A00"}, + {"BBAA99887766554433221103", + "", + "0001020304050607", + "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, + {"BBAA99887766554433221104", + "000102030405060708090A0B0C0D0E0F", + "000102030405060708090A0B0C0D0E0F", + "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, + {"BBAA99887766554433221105", + "000102030405060708090A0B0C0D0E0F", + "", + "8CF761B6902EF764462AD86498CA6B97"}, + {"BBAA99887766554433221106", + "", + "000102030405060708090A0B0C0D0E0F", + "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, + {"BBAA99887766554433221107", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, + {"BBAA99887766554433221108", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "", + "6DC225A071FC1B9F7C69F93B0F1E10DE"}, + {"BBAA99887766554433221109", + "", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, + {"BBAA9988776655443322110A", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, + {"BBAA9988776655443322110B", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "", + "FE80690BEE8A485D11F32965BC9D2A32"}, + {"BBAA9988776655443322110C", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, + {"BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, + {"BBAA9988776655443322110E", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "", + "C5CD9D1850C141E358649994EE701B68"}, + {"BBAA9988776655443322110F", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go new file mode 100644 index 00000000000..5dc158f0128 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go @@ -0,0 +1,24 @@ +package ocb + +// Second set of test vectors from https://tools.ietf.org/html/rfc7253 +var rfc7253TestVectorTaglen96 = struct { + key, nonce, header, plaintext, ciphertext string +}{"0F0E0D0C0B0A09080706050403020100", + "BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} + +var rfc7253AlgorithmTest = []struct { + KEYLEN, TAGLEN int + OUTPUT string }{ + {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, + {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, + {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, + {128, 96, "77A3D8E73589158D25D01209"}, + {192, 96, "05D56EAD2752C86BE6932C5E"}, + {256, 96, "5458359AC23B0CBA9E6330DD"}, + {128, 64, "192C9B7BD90BA06A"}, + {192, 64, "0066BC6E0EF34E24"}, + {256, 64, "7D4EA5D445501CBE"}, + } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go new file mode 100644 index 00000000000..3c6251d1ce6 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go @@ -0,0 +1,153 @@ +// Copyright 2014 Matthew Endsley +// All rights reserved +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted providing that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// Package keywrap is an implementation of the RFC 3394 AES key wrapping +// algorithm. This is used in OpenPGP with elliptic curve keys. +package keywrap + +import ( + "crypto/aes" + "encoding/binary" + "errors" +) + +var ( + // ErrWrapPlaintext is returned if the plaintext is not a multiple + // of 64 bits. + ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") + + // ErrUnwrapCiphertext is returned if the ciphertext is not a + // multiple of 64 bits. + ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") + + // ErrUnwrapFailed is returned if unwrapping a key fails. + ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") + + // NB: the AES NewCipher call only fails if the key is an invalid length. + + // ErrInvalidKey is returned when the AES key is invalid. + ErrInvalidKey = errors.New("keywrap: invalid AES key") +) + +// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Wrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, ErrWrapPlaintext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = IV, an initial value (see 2.2.3) + for ii := 0; ii < 8; ii++ { + block[ii] = 0xA6 + } + + // - For i = 1 to n + // - Set R[i] = P[i] + intermediate := make([]byte, len(plainText)) + copy(intermediate, plainText) + + // 2) Calculate intermediate values. + for ii := 0; ii < 6; ii++ { + for jj := 0; jj < nblocks; jj++ { + // - B = AES(K, A | R[i]) + copy(block[8:], intermediate[jj*8:jj*8+8]) + c.Encrypt(block[:], block[:]) + + // - A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(ii*nblocks + jj + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + // - R[i] = LSB(64, B) + copy(intermediate[jj*8:jj*8+8], block[8:]) + } + } + + // 3) Output results. + // - Set C[0] = A + // - For i = 1 to n + // - C[i] = R[i] + return append(block[:8], intermediate...), nil +} + +// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Unwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, ErrUnwrapCiphertext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = C[0] + copy(block[:8], cipherText[:8]) + + // - For i = 1 to n + // - Set R[i] = C[i] + intermediate := make([]byte, len(cipherText)-8) + copy(intermediate, cipherText[8:]) + + // 2) Compute intermediate values. + for jj := 5; jj >= 0; jj-- { + for ii := nblocks - 1; ii >= 0; ii-- { + // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 + // - A = MSB(64, B) + t := uint64(jj*nblocks + ii + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + copy(block[8:], intermediate[ii*8:ii*8+8]) + c.Decrypt(block[:], block[:]) + + // - R[i] = LSB(B, 64) + copy(intermediate[ii*8:ii*8+8], block[8:]) + } + } + + // 3) Output results. + // - If A is an appropriate initial value (see 2.2.3), + for ii := 0; ii < 8; ii++ { + if block[ii] != 0xA6 { + return nil, ErrUnwrapFailed + } + } + + // - For i = 1 to n + // - P[i] = R[i] + return intermediate, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go new file mode 100644 index 00000000000..3b357e5851b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -0,0 +1,224 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + l.crcSet = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go new file mode 100644 index 00000000000..6f07582c37c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go new file mode 100644 index 00000000000..a94f6150c4b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -0,0 +1,65 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "hash" + "io" +) + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { + start := 0 + for i, c := range buf { + switch *s { + case 0: + if c == '\r' { + *s = 1 + } else if c == '\n' { + cw.Write(buf[start:i]) + cw.Write(newline) + start = i + 1 + } + case 1: + *s = 0 + } + } + + cw.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + return writeCanonical(cth.h, buf, &cth.s) +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 00000000000..0b49be4bf37 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,165 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "bytes" + "crypto/elliptic" + "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" +) + +type KDF struct { + Hash algorithm.Hash + Cipher algorithm.Cipher +} + +type PublicKey struct { + ecc.CurveType + elliptic.Curve + X, Y *big.Int + KDF +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func GenerateKey(c elliptic.Curve, kdf KDF, rand io.Reader) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.Curve = c + priv.PublicKey.KDF = kdf + priv.D, priv.PublicKey.X, priv.PublicKey.Y, err = elliptic.GenerateKey(c, rand) + return +} + +func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + if len(msg) > 40 { + return nil, nil, errors.New("ecdh: message too long") + } + // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, + // AES-192, and AES-256, respectively, to provide the same number of + // octets, 40 total, as an input to the key wrapping method. + padding := make([]byte, 40-len(msg)) + for i := range padding { + padding[i] = byte(40 - len(msg)) + } + m := append(msg, padding...) + + if pub.CurveType == ecc.Curve25519 { + return X25519Encrypt(random, pub, m, curveOID, fingerprint) + } + + d, x, y, err := elliptic.GenerateKey(pub.Curve, random) + if err != nil { + return nil, nil, err + } + + vsG = elliptic.Marshal(pub.Curve, x, y) + zbBig, _ := pub.Curve.ScalarMult(pub.X, pub.Y, d) + + byteLen := (pub.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, m); err != nil { + return nil, nil, err + } + + return vsG, c, nil + +} + +func Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { + if priv.PublicKey.CurveType == ecc.Curve25519 { + return X25519Decrypt(priv, vsG, m, curveOID, fingerprint) + } + x, y := elliptic.Unmarshal(priv.Curve, vsG) + zbBig, _ := priv.Curve.ScalarMult(x, y, priv.D) + + byteLen := (priv.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + z, err := buildKey(&priv.PublicKey, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, err + } + + c, err := keywrap.Unwrap(z, m) + if err != nil { + return nil, err + } + + return c[:len(c)-int(c[len(c)-1])], nil +} + +func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { + // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03 + // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap + // || "Anonymous Sender " || recipient_fingerprint; + param := new(bytes.Buffer) + if _, err := param.Write(curveOID); err != nil { + return nil, err + } + algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()} + if _, err := param.Write(algKDF); err != nil { + return nil, err + } + if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { + return nil, err + } + // For v5 keys, the 20 leftmost octets of the fingerprint are used. + if _, err := param.Write(fingerprint[:20]); err != nil { + return nil, err + } + if param.Len() - len(curveOID) != 45 { + return nil, errors.New("ecdh: malformed KDF Param") + } + + // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); + h := pub.KDF.Hash.New() + if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { + return nil, err + } + zbLen := len(zb) + i := 0 + j := zbLen - 1 + if stripLeading { + // Work around old go crypto bug where the leading zeros are missing. + for ; i < zbLen && zb[i] == 0; i++ {} + } + if stripTrailing { + // Work around old OpenPGP.js bug where insignificant trailing zeros in + // this little-endian number are missing. + // (See https://github.com/openpgpjs/openpgpjs/pull/853.) + for ; j >= 0 && zb[j] == 0; j-- {} + } + if _, err := h.Write(zb[i:j+1]); err != nil { + return nil, err + } + if _, err := h.Write(param.Bytes()); err != nil { + return nil, err + } + mb := h.Sum(nil) + + return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. + +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go new file mode 100644 index 00000000000..15b41a31dff --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "golang.org/x/crypto/curve25519" +) + +// Generates a private-public key-pair. +// 'priv' is a private key; a scalar belonging to the set +// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the +// curve. 'pub' is simply 'priv' * G where G is the base point. +// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. +func x25519GenerateKeyPairBytes(rand io.Reader) (priv [32]byte, pub [32]byte, err error) { + var n, helper = new(big.Int), new(big.Int) + n.SetUint64(1) + n.Lsh(n, 252) + helper.SetString("27742317777372353535851937790883648493", 10) + n.Add(n, helper) + + for true { + _, err = io.ReadFull(rand, priv[:]) + if err != nil { + return + } + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // of the curve. + priv[0] &= 248 + priv[31] &= 127 + priv[31] |= 64 + + // If the scalar is out of range, sample another random number. + if new(big.Int).SetBytes(priv[:]).Cmp(n) >= 0 { + continue + } + + curve25519.ScalarBaseMult(&pub, &priv) + return + } + return +} + +// X25519GenerateKey samples the key pair according to the correct distribution. +// It also sets the given key-derivation function and returns the *PrivateKey +// object along with an error. +func X25519GenerateKey(rand io.Reader, kdf KDF) (priv *PrivateKey, err error) { + ci := ecc.FindByName("Curve25519") + priv = new(PrivateKey) + priv.PublicKey.Curve = ci.Curve + d, pubKey, err := x25519GenerateKeyPairBytes(rand) + if err != nil { + return nil, err + } + priv.PublicKey.KDF = kdf + priv.D = make([]byte, 32) + copyReversed(priv.D, d[:]) + priv.PublicKey.CurveType = ci.CurveType + priv.PublicKey.Curve = ci.Curve + /* + * Note that ECPoint.point differs from the definition of public keys in + * [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is + * more uniform with how big integers are represented in TLS, and (2) there + * is an additional length byte (so ECpoint.point is actually 33 bytes), + * again for uniformity (and extensibility). + */ + var encodedKey = make([]byte, 33) + encodedKey[0] = 0x40 + copy(encodedKey[1:], pubKey[:]) + priv.PublicKey.X = new(big.Int).SetBytes(encodedKey[:]) + priv.PublicKey.Y = new(big.Int) + return priv, nil +} + +func X25519Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + d, ephemeralKey, err := x25519GenerateKeyPairBytes(random) + if err != nil { + return nil, nil, err + } + var pubKey [32]byte + + if pub.X.BitLen() > 33*264 { + return nil, nil, errors.New("ecdh: invalid key") + } + copy(pubKey[:], pub.X.Bytes()[1:]) + + var zb [32]byte + curve25519.ScalarBaseMult(&zb, &d) + curve25519.ScalarMult(&zb, &d, &pubKey) + z, err := buildKey(pub, zb[:], curveOID, fingerprint, false, false) + + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, msg); err != nil { + return nil, nil, err + } + + var vsg [33]byte + vsg[0] = 0x40 + copy(vsg[1:], ephemeralKey[:]) + + return vsg[:], c, nil +} + +func X25519Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { + var zb, d, ephemeralKey [32]byte + if len(vsG) != 33 || vsG[0] != 0x40 { + return nil, errors.New("ecdh: invalid key") + } + copy(ephemeralKey[:], vsG[1:33]) + + copyReversed(d[:], priv.D) + curve25519.ScalarBaseMult(&zb, &d) + curve25519.ScalarMult(&zb, &d, &ephemeralKey) + + var c []byte + + for i := 0; i < 3; i++ { + // Try buildKey three times for compat, see comments in buildKey. + z, err := buildKey(&priv.PublicKey, zb[:], curveOID, fingerprint, i == 1, i == 2) + if err != nil { + return nil, err + } + + res, err := keywrap.Unwrap(z, m) + if i == 2 && err != nil { + // Only return an error after we've tried all variants of buildKey. + return nil, err + } + + c = res + if err == nil { + break + } + } + + return c[:len(c)-int(c[len(c)-1])], nil +} + +func copyReversed(out []byte, in []byte) { + l := len(in) + for i := 0; i < l; i++ { + out[i] = in[l-i-1] + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 00000000000..6a07d8ff279 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + if s.ModInverse(s, priv.P) == nil { + return nil, errors.New("elgamal: invalid private key") + } + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go new file mode 100644 index 00000000000..17e2bcfed20 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,116 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") +var ErrMDCMissing error = SignatureError("MDC packet not found") + +type signatureExpiredError int + +func (se signatureExpiredError) Error() string { + return "openpgp: signature expired" +} + +var ErrSignatureExpired error = signatureExpiredError(0) + +type keyExpiredError int + +func (ke keyExpiredError) Error() string { + return "openpgp: key expired" +} + +var ErrKeyExpired error = keyExpiredError(0) + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +// KeyInvalidError indicates that the public key parameters are invalid +// as they do not match the private ones +type KeyInvalidError string + +func (e KeyInvalidError) Error() string { + return "openpgp: invalid key: " + string(e) +} + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +// AEADError indicates that there is a problem when initializing or using a +// AEAD instance, configuration struct, nonces or index values. +type AEADError string + +func (ae AEADError) Error() string { + return "openpgp: aead error: " + string(ae) +} + +// ErrDummyPrivateKey results when operations are attempted on a private key +// that is just a dummy key. See +// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 +type ErrDummyPrivateKey string + +func (dke ErrDummyPrivateKey) Error() string { + return "openpgp: s2k GNU dummy key: " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go new file mode 100644 index 00000000000..17a1bfe9cb5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019 ProtonTech AG + +package algorithm + +import ( + "crypto/cipher" + "github.com/ProtonMail/go-crypto/eax" + "github.com/ProtonMail/go-crypto/ocb" +) + +// AEADMode defines the Authenticated Encryption with Associated Data mode of +// operation. +type AEADMode uint8 + +// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) +const ( + AEADModeEAX = AEADMode(1) + AEADModeOCB = AEADMode(2) + AEADModeGCM = AEADMode(100) +) + +// TagLength returns the length in bytes of authentication tags. +func (mode AEADMode) TagLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 16 + case AEADModeGCM: + return 16 + default: + return 0 + } +} + +// NonceLength returns the length in bytes of nonces. +func (mode AEADMode) NonceLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 15 + case AEADModeGCM: + return 12 + default: + return 0 + } +} + +// New returns a fresh instance of the given mode +func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { + var err error + switch mode { + case AEADModeEAX: + alg, err = eax.NewEAX(block) + case AEADModeOCB: + alg, err = ocb.NewOCB(block) + case AEADModeGCM: + alg, err = cipher.NewGCM(block) + } + if err != nil { + panic(err.Error()) + } + return alg +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go new file mode 100644 index 00000000000..5760cff80ea --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -0,0 +1,107 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + + "golang.org/x/crypto/cast5" +) + +// Cipher is an official symmetric key cipher algorithm. See RFC 4880, +// section 9.2. +type Cipher interface { + // Id returns the algorithm ID, as a byte, of the cipher. + Id() uint8 + // KeySize returns the key size, in bytes, of the cipher. + KeySize() int + // BlockSize returns the block size, in bytes, of the cipher. + BlockSize() int + // New returns a fresh instance of the given cipher. + New(key []byte) cipher.Block +} + +// The following constants mirror the OpenPGP standard (RFC 4880). +const ( + TripleDES = CipherFunction(2) + CAST5 = CipherFunction(3) + AES128 = CipherFunction(7) + AES192 = CipherFunction(8) + AES256 = CipherFunction(9) +) + +// CipherById represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +var CipherById = map[uint8]Cipher{ + TripleDES.Id(): TripleDES, + CAST5.Id(): CAST5, + AES128.Id(): AES128, + AES192.Id(): AES192, + AES256.Id(): AES256, +} + +type CipherFunction uint8 + +// ID returns the algorithm Id, as a byte, of cipher. +func (sk CipherFunction) Id() uint8 { + return uint8(sk) +} + +var keySizeByID = map[uint8]int{ + TripleDES.Id(): 24, + CAST5.Id(): cast5.KeySize, + AES128.Id(): 16, + AES192.Id(): 24, + AES256.Id(): 32, +} + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case TripleDES: + return 24 + case CAST5: + return cast5.KeySize + case AES128: + return 16 + case AES192: + return 24 + case AES256: + return 32 + } + return 0 +} + +// BlockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) BlockSize() int { + switch cipher { + case TripleDES: + return des.BlockSize + case CAST5: + return 8 + case AES128, AES192, AES256: + return 16 + } + return 0 +} + +// New returns a fresh instance of the given cipher. +func (cipher CipherFunction) New(key []byte) (block cipher.Block) { + var err error + switch cipher { + case TripleDES: + block, err = des.NewTripleDESCipher(key) + case CAST5: + block, err = cast5.NewCipher(key) + case AES128, AES192, AES256: + block, err = aes.NewCipher(key) + } + if err != nil { + panic(err.Error()) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go new file mode 100644 index 00000000000..3f1b61b88e6 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go @@ -0,0 +1,86 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto" + "fmt" + "hash" +) + +// Hash is an official hash function algorithm. See RFC 4880, section 9.4. +type Hash interface { + // Id returns the algorithm ID, as a byte, of Hash. + Id() uint8 + // Available reports whether the given hash function is linked into the binary. + Available() bool + // HashFunc simply returns the value of h so that Hash implements SignerOpts. + HashFunc() crypto.Hash + // New returns a new hash.Hash calculating the given hash function. New + // panics if the hash function is not linked into the binary. + New() hash.Hash + // Size returns the length, in bytes, of a digest resulting from the given + // hash function. It doesn't require that the hash function in question be + // linked into the program. + Size() int + // String is the name of the hash function corresponding to the given + // OpenPGP hash id. + String() string +} + +// The following vars mirror the crypto/Hash supported hash functions. +var ( + MD5 Hash = cryptoHash{1, crypto.MD5} + SHA1 Hash = cryptoHash{2, crypto.SHA1} + RIPEMD160 Hash = cryptoHash{3, crypto.RIPEMD160} + SHA256 Hash = cryptoHash{8, crypto.SHA256} + SHA384 Hash = cryptoHash{9, crypto.SHA384} + SHA512 Hash = cryptoHash{10, crypto.SHA512} + SHA224 Hash = cryptoHash{11, crypto.SHA224} +) + +// HashById represents the different hash functions specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 +var ( + HashById = map[uint8]Hash{ + MD5.Id(): MD5, + SHA1.Id(): SHA1, + RIPEMD160.Id(): RIPEMD160, + SHA256.Id(): SHA256, + SHA384.Id(): SHA384, + SHA512.Id(): SHA512, + SHA224.Id(): SHA224, + } +) + +// cryptoHash contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +type cryptoHash struct { + id uint8 + crypto.Hash +} + +// Id returns the algorithm ID, as a byte, of cryptoHash. +func (h cryptoHash) Id() uint8 { + return h.id +} + +var hashNames = map[uint8]string{ + MD5.Id(): "MD5", + SHA1.Id(): "SHA1", + RIPEMD160.Id(): "RIPEMD160", + SHA256.Id(): "SHA256", + SHA384.Id(): "SHA384", + SHA512.Id(): "SHA512", + SHA224.Id(): "SHA224", +} + +func (h cryptoHash) String() string { + s, ok := hashNames[h.id] + if !ok { + panic(fmt.Sprintf("Unsupported hash function %d", h.id)) + } + return s +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go new file mode 100644 index 00000000000..f91042fd85e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go @@ -0,0 +1,118 @@ +package ecc + +import ( + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "crypto/elliptic" + "bytes" + "github.com/ProtonMail/go-crypto/bitcurves" + "github.com/ProtonMail/go-crypto/brainpool" +) + +type SignatureAlgorithm uint8 + +const ( + ECDSA SignatureAlgorithm = 1 + EdDSA SignatureAlgorithm = 2 +) + +type CurveInfo struct { + Name string + Oid *encoding.OID + Curve elliptic.Curve + SigAlgorithm SignatureAlgorithm + CurveType CurveType +} + +var curves = []CurveInfo{ + { + Name: "NIST curve P-256", + Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), + Curve: elliptic.P256(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "NIST curve P-384", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), + Curve: elliptic.P384(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "NIST curve P-521", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), + Curve: elliptic.P521(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "SecP256k1", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), + Curve: bitcurves.S256(), + CurveType: BitCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), + Curve: elliptic.P256(),// filler + CurveType: Curve25519, + SigAlgorithm: ECDSA, + }, + { + Name: "Ed25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), + Curve: elliptic.P256(), // filler + CurveType: NISTCurve, + SigAlgorithm: EdDSA, + }, + { + Name: "Brainpool P256r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), + Curve: brainpool.P256r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "BrainpoolP384r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), + Curve: brainpool.P384r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "BrainpoolP512r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), + Curve: brainpool.P512r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, +} + +func FindByCurve(curve elliptic.Curve) *CurveInfo { + for _, curveInfo := range curves { + if curveInfo.Curve == curve { + return &curveInfo + } + } + return nil +} + +func FindByOid(oid encoding.Field) *CurveInfo { + var rawBytes = oid.Bytes() + for _, curveInfo := range curves { + if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { + return &curveInfo + } + } + return nil +} + +func FindByName(name string) *CurveInfo { + for _, curveInfo := range curves { + if curveInfo.Name == name { + return &curveInfo + } + } + return nil +} \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go new file mode 100644 index 00000000000..de8bca0ac23 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go @@ -0,0 +1,10 @@ +package ecc + +type CurveType uint8 + +const ( + NISTCurve CurveType = 1 + Curve25519 CurveType = 2 + BitCurve CurveType = 3 + BrainpoolCurve CurveType = 4 +) \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go new file mode 100644 index 00000000000..6c921481b7b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding implements openpgp packet field encodings as specified in +// RFC 4880 and 6637. +package encoding + +import "io" + +// Field is an encoded field of an openpgp packet. +type Field interface { + // Bytes returns the decoded data. + Bytes() []byte + + // BitLength is the size in bits of the decoded data. + BitLength() uint16 + + // EncodedBytes returns the encoded data. + EncodedBytes() []byte + + // EncodedLength is the size in bytes of the encoded data. + EncodedLength() uint16 + + // ReadFrom reads the next Field from r. + ReadFrom(r io.Reader) (int64, error) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go new file mode 100644 index 00000000000..02e5e695c38 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + "math/big" + "math/bits" +) + +// An MPI is used to store the contents of a big integer, along with the bit +// length that was specified in the original input. This allows the MPI to be +// reserialized exactly. +type MPI struct { + bytes []byte + bitLength uint16 +} + +// NewMPI returns a MPI initialized with bytes. +func NewMPI(bytes []byte) *MPI { + for len(bytes) != 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + bitLength := uint16(0) + return &MPI{bytes, bitLength} + } + bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) + return &MPI{bytes, bitLength} +} + +// Bytes returns the decoded data. +func (m *MPI) Bytes() []byte { + return m.bytes +} + +// BitLength is the size in bits of the decoded data. +func (m *MPI) BitLength() uint16 { + return m.bitLength +} + +// EncodedBytes returns the encoded data. +func (m *MPI) EncodedBytes() []byte { + return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (m *MPI) EncodedLength() uint16 { + return uint16(2 + len(m.bytes)) +} + +// ReadFrom reads into m the next MPI from r. +func (m *MPI) ReadFrom(r io.Reader) (int64, error) { + var buf [2]byte + n, err := io.ReadFull(r, buf[0:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + m.bytes = make([]byte, (int(m.bitLength)+7)/8) + + nn, err := io.ReadFull(r, m.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + // remove leading zero bytes from malformed GnuPG encoded MPIs: + // https://bugs.gnupg.org/gnupg/issue1853 + // for _, b := range m.bytes { + // if b != 0 { + // break + // } + // m.bytes = m.bytes[1:] + // m.bitLength -= 8 + // } + + return int64(n) + int64(nn), err +} + +// SetBig initializes m with the bits from n. +func (m *MPI) SetBig(n *big.Int) *MPI { + m.bytes = n.Bytes() + m.bitLength = uint16(n.BitLen()) + return m +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go new file mode 100644 index 00000000000..ee39fd6bbb1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OID is used to store a variable-length field with a one-octet size +// prefix. See https://tools.ietf.org/html/rfc6637#section-9. +type OID struct { + bytes []byte +} + +const ( + // maxOID is the maximum number of bytes in a OID. + maxOID = 254 + // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC + // specifies are reserved. + reservedOIDLength1 = 0 + reservedOIDLength2 = 0xff +) + +// NewOID returns a OID initialized with bytes. +func NewOID(bytes []byte) *OID { + switch len(bytes) { + case reservedOIDLength1, reservedOIDLength2: + panic("encoding: NewOID argument length is reserved") + default: + if len(bytes) > maxOID { + panic("encoding: NewOID argment too large") + } + } + + return &OID{ + bytes: bytes, + } +} + +// Bytes returns the decoded data. +func (o *OID) Bytes() []byte { + return o.bytes +} + +// BitLength is the size in bits of the decoded data. +func (o *OID) BitLength() uint16 { + return uint16(len(o.bytes) * 8) +} + +// EncodedBytes returns the encoded data. +func (o *OID) EncodedBytes() []byte { + return append([]byte{byte(len(o.bytes))}, o.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (o *OID) EncodedLength() uint16 { + return uint16(1 + len(o.bytes)) +} + +// ReadFrom reads into b the next OID from r. +func (o *OID) ReadFrom(r io.Reader) (int64, error) { + var buf [1]byte + n, err := io.ReadFull(r, buf[:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + switch buf[0] { + case reservedOIDLength1, reservedOIDLength2: + return int64(n), errors.UnsupportedError("reserved for future extensions") + } + + o.bytes = make([]byte, buf[0]) + + nn, err := io.ReadFull(r, o.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + return int64(n) + int64(nn), err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go new file mode 100644 index 00000000000..0952d8c7392 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -0,0 +1,384 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + goerrors "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "golang.org/x/crypto/ed25519" +) + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + + // Generate a primary signing key + primaryPrivRaw, err := newSigner(config) + if err != nil { + return nil, err + } + primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) + if config != nil && config.V5Keys { + primary.UpgradeToV5() + } + + isPrimaryId := true + selfSignature := &packet.Signature{ + Version: primary.PublicKey.Version, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: primary.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + IssuerKeyId: &primary.PublicKey.KeyId, + IssuerFingerprint: primary.PublicKey.Fingerprint, + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + MDC: true, // true by default, see 5.8 vs. 5.14 + AEAD: config.AEAD() != nil, + V5Keys: config != nil && config.V5Keys, + } + + // Set the PreferredHash for the SelfSignature from the packet.Config. + // If it is not the must-implement algorithm from rfc4880bis, append that. + selfSignature.PreferredHash = []uint8{hashToHashId(config.Hash())} + if config.Hash() != crypto.SHA256 { + selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) + } + + // Likewise for DefaultCipher. + selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} + if config.Cipher() != packet.CipherAES128 { + selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) + } + + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + + // And for DefaultMode. + selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeEAX { + selfSignature.PreferredAEAD = append(selfSignature.PreferredAEAD, uint8(packet.AEADModeEAX)) + } + + // User ID binding signature + err = selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) + if err != nil { + return nil, err + } + + // Generate an encryption subkey + subPrivRaw, err := newDecrypter(config) + if err != nil { + return nil, err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + sub.PublicKey.IsSubkey = true + if config != nil && config.V5Keys { + sub.UpgradeToV5() + } + + // NOTE: No KeyLifetimeSecs here, but we will not return this subkey in EncryptionKey() + // if the primary/master key has expired. + subKey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: primary.PublicKey.Version, + CreationTime: creationTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: primary.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &primary.PublicKey.KeyId, + }, + } + + // Subkey binding signature + err = subKey.Sig.SignKey(subKey.PublicKey, primary, config) + if err != nil { + return nil, err + } + + return &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: map[string]*Identity{ + uid.Id: &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: selfSignature, + Signatures: []*packet.Signature{selfSignature}, + }, + }, + Subkeys: []Subkey{subKey}, + }, nil +} + +// AddSigningSubkey adds a signing keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddSigningSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newSigner(config) + if err != nil { + return err + } + sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagSign: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + EmbeddedSignature: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + SigType: packet.SigTypePrimaryKeyBinding, + PubKeyAlgo: sub.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + }, + } + if config != nil && config.V5Keys { + subkey.PublicKey.UpgradeToV5() + } + + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + + subkey.PublicKey.IsSubkey = true + subkey.PrivateKey.IsSubkey = true + if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newDecrypter(config) + if err != nil { + return err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + if config != nil && config.V5Keys { + subkey.PublicKey.UpgradeToV5() + } + + subkey.PublicKey.IsSubkey = true + subkey.PrivateKey.IsSubkey = true + if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// Generates a signing key +func newSigner(config *packet.Config) (signer crypto.Signer, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + _, priv, err := ed25519.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return &priv, nil + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +// Generates an encryption/decryption key +func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + fallthrough // When passing EdDSA, we generate an ECDH subkey + case packet.PubKeyAlgoECDH: + var kdf = ecdh.KDF{ + Hash: algorithm.SHA512, + Cipher: algorithm.AES256, + } + return ecdh.X25519GenerateKey(config.Random(), kdf) + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +var bigOne = big.NewInt(1) + +// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the +// given bit size, using the given random source and prepopulated primes. +func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { + priv := new(rsa.PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") + } + + if bits < 1024 { + return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + var err error + if len(prepopulatedPrimes) == 0 { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + } else { + primes[i] = prepopulatedPrimes[0] + prepopulatedPrimes = prepopulatedPrimes[1:] + } + + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + priv.D = new(big.Int) + e := big.NewInt(int64(priv.E)) + ok := priv.D.ModInverse(e, totient) + + if ok != nil { + priv.Primes = primes + priv.N = n + break + } + } + + priv.Precompute() + return priv, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go new file mode 100644 index 00000000000..cef3613b611 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -0,0 +1,789 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + goerrors "errors" + "io" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature + Revocations []*packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. +func (e *Entity) PrimaryIdentity() *Identity { + var primaryIdentity *Identity + for _, ident := range e.Identities { + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident + } + } + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if (existingId == nil) { + return true + } + + if (len(existingId.Revocations) > len(potentialNewId.Revocations)) { + return true + } + + if (len(existingId.Revocations) < len(potentialNewId.Revocations)) { + return false + } + + if (existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId)) { + return false + } + + if (!(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) +} + +// EncryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { + // Fail to find any encryption key if the... + i := e.PrimaryIdentity() + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt with, then we can obviously use it. + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + } + + return Key{}, false +} + +// SigningKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) SigningKey(now time.Time) (Key, bool) { + return e.SigningKeyById(now, 0) +} + +// SigningKeyById return the Key for signing a message with this +// Entity and keyID. +func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { + // Fail to find any signing key if the... + i := e.PrimaryIdentity() + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for idx, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && + (id == 0 || subkey.PublicKey.KeyId == id) { + candidateSubkey = idx + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. Or, if the primary key is marked as ok to + // sign with, then we can use it. + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + } + + // No keys with a valid Signing Flag or no keys matched the id passed in + return Key{}, false +} + +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + ident := e.PrimaryIdentity() + selfSig := ident.SelfSignature + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + identity.SelfSignature = sig + } + identity.Signatures = append(identity.Signatures, sig) + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Revocations = append(subKey.Revocations, sig) + case packet.SigTypeSubkeyBinding: + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + if e.PrivateKey.Dummy() { + return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") + } + return e.serializePrivate(w, config, true) +} + +// SerializePrivateWithoutSigning serializes an Entity, including private key +// material, but excluding signatures from other entities, to the given Writer. +// Self-signatures of identities and subkeys are not re-signed. This is useful +// when serializing GNU dummy keys, among other things. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { + return e.serializePrivate(w, config, false) +} + +func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { + if e.PrivateKey == nil { + return goerrors.New("openpgp: private key is missing") + } + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if reSign { + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + for _, revocation := range ident.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if reSign { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + if subkey.Sig.EmbeddedSignature != nil { + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, + subkey.PrivateKey, config) + if err != nil { + return + } + } + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + Version: signer.PrivateKey.Version, + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the +// specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + revSig := &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: config.Now(), + SigType: packet.SigTypeKeyRevocation, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + RevocationReason: &reason, + RevocationReasonText: reasonText, + IssuerKeyId: &e.PrimaryKey.KeyId, + } + + if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { + return err + } + e.Revocations = append(e.Revocations, revSig) + return nil +} + +// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for +// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { + return errors.InvalidArgumentError("given subkey is not associated with this key") + } + + revSig := &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: config.Now(), + SigType: packet.SigTypeSubkeyRevocation, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + RevocationReason: &reason, + RevocationReasonText: reasonText, + IssuerKeyId: &e.PrimaryKey.KeyId, + } + + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + sk.Revocations = append(sk.Revocations, revSig) + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go new file mode 100644 index 00000000000..a2219e64d1c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -0,0 +1,400 @@ +package openpgp + +const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" +const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" +const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" +const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" + +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` + +const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e +DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ +uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW +ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx +nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ +x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg +PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy +9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ +1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 +depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl +aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 +DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa +XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU +8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 +b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD +BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG +0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N +s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb +tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 +BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE +/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 +kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z +VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa +PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ +snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi +bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 +K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X +8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ +TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb +OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l +QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V +yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U +heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB +7qTZOahrETw= +=IKnw +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR +s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL +EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ +lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 +2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 +69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 +Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b +wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW +FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR +AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM +vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx +22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj +7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY +AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 +tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX +JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl +mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 +8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC +0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b +4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl +Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY +=3fWu +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L +plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz +r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ +sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ +3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm +k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s +GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G +HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT +CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR +AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN +MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j +UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS +YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs +nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ +U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es +HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK +lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg +AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV +UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM +0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX +12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa +3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi +wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 +Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G +1dh1WoUCyREZsJQg2YoIpWIcvw+a +=bNRo +-----END PGP PUBLIC KEY BLOCK----- +` + +const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1 + +lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 +GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 +I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB +/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ +CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl +rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ +6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD +ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX +M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED +Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f +yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk +UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH +e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl +AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r +oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo +UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS +YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ +kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC +WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW +n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 +/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 +R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE +VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix +7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== +=F/T0 +-----END PGP PRIVATE KEY BLOCK-----` + +const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii +B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS +PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 +lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA +aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD +Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v +Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S +rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH +AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA +iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ +Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ +AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou +b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== +=KLN8 +-----END PGP PRIVATE KEY BLOCK-----` + +const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd +oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk +/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ +gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r +njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU +iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK +Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m +kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR +MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI +zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A +rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP +CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr +MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c +bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd +hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ +3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS +ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL +Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 +Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz +YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r +sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV +JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 +EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt +Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw +vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ +iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg +UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y +HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL +Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy +bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP +Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic +BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 +v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV +oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c +wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R +fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa +4g== +=XZm8 +-----END PGP PRIVATE KEY BLOCK-----` + +// https://tests.sequoia-pgp.org/#Certificate_expiration +// P _ U p +const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go new file mode 100644 index 00000000000..7350974effc --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import "math/bits" + +// AEADConfig collects a number of AEAD parameters along with sensible defaults. +// A nil AEADConfig is valid and results in all default values. +type AEADConfig struct { + // The AEAD mode of operation. + DefaultMode AEADMode + // Amount of octets in each chunk of data + ChunkSize uint64 +} + +// Mode returns the AEAD mode of operation. +func (conf *AEADConfig) Mode() AEADMode { + if conf == nil || conf.DefaultMode == 0 { + return AEADModeEAX + } + mode := conf.DefaultMode + if mode != AEADModeEAX && mode != AEADModeOCB && + mode != AEADModeExperimentalGCM { + panic("AEAD mode unsupported") + } + return mode +} + +// ChunkSizeByte returns the byte indicating the chunk size. The effective +// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) +func (conf *AEADConfig) ChunkSizeByte() byte { + if conf == nil || conf.ChunkSize == 0 { + return 12 // 1 << (12 + 6) == 262144 bytes + } + + chunkSize := conf.ChunkSize + exponent := bits.Len64(chunkSize) - 1 + switch { + case exponent < 6: + exponent = 6 + case exponent > 27: + exponent = 27 + } + + return byte(exponent - 6) +} + +// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the +// maximum returned value is 1 << 30. +func decodeAEADChunkSize(c byte) int { + size := uint64(1 << (c + 6)) + if size != uint64(int(size)) { + return 1 << 30 + } + return int(size) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go new file mode 100644 index 00000000000..85c53f08aa5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -0,0 +1,364 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/rand" + "encoding/binary" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// AEADEncrypted represents an AEAD Encrypted Packet (tag 20, RFC4880bis-5.16). +type AEADEncrypted struct { + cipher CipherFunction + mode AEADMode + chunkSizeByte byte + Contents io.Reader // Encrypted chunks and tags + initialNonce []byte // Referred to as IV in RFC4880-bis +} + +// Only currently defined version +const aeadEncryptedVersion = 1 + +// An AEAD opener/sealer, its configuration, and data for en/decryption. +type aeadCrypter struct { + aead cipher.AEAD + chunkSize int + initialNonce []byte + associatedData []byte // Chunk-independent associated data + chunkIndex []byte // Chunk counter + bytesProcessed int // Amount of plaintext bytes encrypted/decrypted + buffer bytes.Buffer // Buffered bytes accross chunks +} + +// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according +// to the AEAD block size, and buffers the extra encrypted bytes for next write. +type aeadEncrypter struct { + aeadCrypter // Embedded plaintext sealer + writer io.WriteCloser // 'writer' is a partialLengthWriter +} + +// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when +// necessary, similar to aeadEncrypter. +type aeadDecrypter struct { + aeadCrypter // Embedded ciphertext opener + reader io.Reader // 'reader' is a partialLengthReader + peekedBytes []byte // Used to detect last chunk + eof bool +} + +func (ae *AEADEncrypted) parse(buf io.Reader) error { + headerData := make([]byte, 4) + if n, err := io.ReadFull(buf, headerData); n < 4 { + return errors.AEADError("could not read aead header:" + err.Error()) + } + // Read initial nonce + mode := AEADMode(headerData[2]) + nonceLen := mode.NonceLength() + if nonceLen == 0 { + return errors.AEADError("unknown mode") + } + initialNonce := make([]byte, nonceLen) + if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { + return errors.AEADError("could not read aead nonce:" + err.Error()) + } + ae.Contents = buf + ae.initialNonce = initialNonce + c := headerData[1] + if _, ok := algorithm.CipherById[c]; !ok { + return errors.UnsupportedError("unknown cipher: " + string(c)) + } + ae.cipher = CipherFunction(c) + ae.mode = mode + ae.chunkSizeByte = byte(headerData[3]) + return nil +} + +// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or +// an error. +func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { + return ae.decrypt(key) +} + +// decrypt prepares an aeadCrypter and returns a ReadCloser from which +// decrypted bytes can be read (see aeadDecrypter.Read()). +func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { + blockCipher := ae.cipher.new(key) + aead := ae.mode.new(blockCipher) + // Carry the first tagLen bytes + tagLen := ae.mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(ae.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) + } + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: chunkSize, + initialNonce: ae.initialNonce, + associatedData: ae.associatedData(), + chunkIndex: make([]byte, 8), + }, + reader: ae.Contents, + peekedBytes: peekedBytes}, nil +} + +// Read decrypts bytes and reads them into dst. It decrypts when necessary and +// buffers extra decrypted bytes. It returns the number of bytes copied into dst +// and an error. +func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { + // Return buffered plaintext bytes from previous calls + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF + } + + // Read a chunk + tagLen := ar.aead.Overhead() + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { + return nil +} + +// SerializeAEADEncrypted initializes the aeadCrypter and returns a writer. +// This writer encrypts and writes bytes (see aeadEncrypter.Write()). +func SerializeAEADEncrypted(w io.Writer, key []byte, cipher CipherFunction, mode AEADMode, config *Config) (io.WriteCloser, error) { + writeCloser := noOpCloser{w} + writer, err := serializeStreamHeader(writeCloser, packetTypeAEADEncrypted) + if err != nil { + return nil, err + } + + // Data for en/decryption: tag, version, cipher, aead mode, chunk size + aeadConf := config.AEAD() + prefix := []byte{ + 0xD4, + aeadEncryptedVersion, + byte(config.Cipher()), + byte(aeadConf.Mode()), + aeadConf.ChunkSizeByte(), + } + n, err := writer.Write(prefix[1:]) + if err != nil || n < 4 { + return nil, errors.AEADError("could not write AEAD headers") + } + // Sample nonce + nonceLen := aeadConf.Mode().NonceLength() + nonce := make([]byte, nonceLen) + n, err = rand.Read(nonce) + if err != nil { + panic("Could not sample random nonce") + } + _, err = writer.Write(nonce) + if err != nil { + return nil, err + } + blockCipher := CipherFunction(config.Cipher()).new(key) + alg := AEADMode(aeadConf.Mode()).new(blockCipher) + + chunkSize := decodeAEADChunkSize(aeadConf.ChunkSizeByte()) + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: alg, + chunkSize: chunkSize, + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + }, + writer: writer}, nil +} + +// Write encrypts and writes bytes. It encrypts when necessary and buffers extra +// plaintext bytes for next call. When the stream is finished, Close() MUST be +// called to append the final tag. +func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + } + return +} + +// Close encrypts and writes the remaining buffered plaintext if any, appends +// the final authentication tag, and closes the embedded writer. This function +// MUST be called at the end of a stream. +func (aw *aeadEncrypter) Close() (err error) { + // Encrypt and write a chunk if there's buffered data left, or if we haven't + // written any chunks yet. + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return err + } + _, err = aw.writer.Write(lastEncryptedChunk) + if err != nil { + return err + } + } + // Compute final tag (associated data: packet tag, version, cipher, aead, + // chunk size, index, total number of encrypted octets). + adata := append(aw.associatedData[:], aw.chunkIndex[:]...) + adata = append(adata, make([]byte, 8)...) + binary.BigEndian.PutUint64(adata[13:], uint64(aw.bytesProcessed)) + nonce := aw.computeNextNonce() + finalTag := aw.aead.Seal(nil, nonce, nil, adata) + _, err = aw.writer.Write(finalTag) + if err != nil { + return err + } + return aw.writer.Close() +} + +// sealChunk Encrypts and authenticates the given chunk. +func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { + if len(data) > aw.chunkSize { + return nil, errors.AEADError("chunk exceeds maximum length") + } + if aw.associatedData == nil { + return nil, errors.AEADError("can't seal without headers") + } + adata := append(aw.associatedData, aw.chunkIndex...) + nonce := aw.computeNextNonce() + encrypted := aw.aead.Seal(nil, nonce, data, adata) + aw.bytesProcessed += len(data) + if err := aw.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return encrypted, nil +} + +// openChunk decrypts and checks integrity of an encrypted chunk, returning +// the underlying plaintext and an error. It access peeked bytes from next +// chunk, to identify the last chunk and decrypt/validate accordingly. +func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + adata := append(ar.associatedData, ar.chunkIndex...) + nonce := ar.computeNextNonce() + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err + } + ar.bytesProcessed += len(plainChunk) + if err = ar.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return plainChunk, nil +} + +// Checks the summary tag. It takes into account the total decrypted bytes into +// the associated data. It returns an error, or nil if the tag is valid. +func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { + // Associated: tag, version, cipher, aead, chunk size, index, and octets + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) + adata := append(ar.associatedData, ar.chunkIndex...) + adata = append(adata, amountBytes...) + nonce := ar.computeNextNonce() + _, err := ar.aead.Open(nil, nonce, tag, adata) + if err != nil { + return err + } + return nil +} + +// Associated data for chunks: tag, version, cipher, mode, chunk size byte +func (ae *AEADEncrypted) associatedData() []byte { + return []byte{ + 0xD4, + aeadEncryptedVersion, + byte(ae.cipher), + byte(ae.mode), + ae.chunkSizeByte} +} + +// computeNonce takes the incremental index and computes an eXclusive OR with +// the least significant 8 bytes of the receivers' initial nonce (see sec. +// 5.16.1 and 5.16.2). It returns the resulting nonce. +func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 + for i := 0; i < 8; i++ { + nonce[i+offset] ^= wo.chunkIndex[i] + } + return +} + +// incrementIndex perfoms an integer increment by 1 of the integer represented by the +// slice, modifying it accordingly. +func (wo *aeadCrypter) incrementIndex() error { + index := wo.chunkIndex + if len(index) == 0 { + return errors.AEADError("Index has length 0") + } + for i := len(index) - 1; i >= 0; i-- { + if index[i] < 255 { + index[i]++ + return nil + } + index[i] = 0 + } + return errors.AEADError("cannot further increment index") +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go new file mode 100644 index 00000000000..c55cecd3577 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go new file mode 100644 index 00000000000..5652dd81485 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -0,0 +1,167 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "math/big" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // The public key algorithm to use - will always create a signing primary + // key and encryption subkey. + Algorithm PublicKeyAlgorithm + // Some known primes that are optionally prepopulated by the caller + RSAPrimes []*big.Int + // AEADConfig configures the use of the new AEAD Encrypted Data Packet, + // defined in the draft of the next version of the OpenPGP specification. + // If a non-nil AEADConfig is passed, usage of this packet is enabled. By + // default, it is disabled. See the documentation of AEADConfig for more + // configuration options related to AEAD. + // **Note: using this option may break compatibility with other OpenPGP + // implementations, as well as future versions of this library.** + AEADConfig *AEADConfig + // V5Keys configures version 5 key generation. If false, this package still + // supports version 5 keys, but produces version 4 keys. + V5Keys bool + // "The validity period of the key. This is the number of seconds after + // the key creation time that the key expires. If this is not present + // or has a value of zero, the key never expires. This is found only on + // a self-signature."" + // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 + KeyLifetimeSecs uint32 + // "The validity period of the signature. This is the number of seconds + // after the signature creation time that the signature expires. If + // this is not present or has a value of zero, it never expires." + // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 + SigLifetimeSecs uint32 + // SigningKeyId is used to specify the signing key to use (by Key ID). + // By default, the signing key is selected automatically, preferring + // signing subkeys if available. + SigningKeyId uint64 +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +// KeyLifetime returns the validity period of the key. +func (c *Config) KeyLifetime() uint32 { + if c == nil { + return 0 + } + return c.KeyLifetimeSecs +} + +// SigLifetime returns the validity period of the signature. +func (c *Config) SigLifetime() uint32 { + if c == nil { + return 0 + } + return c.SigLifetimeSecs +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) RSAModulusBits() int { + if c == nil || c.RSABits == 0 { + return 2048 + } + return c.RSABits +} + +func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { + if c == nil || c.Algorithm == 0 { + return PubKeyAlgoRSA + } + return c.Algorithm +} + +func (c *Config) AEAD() *AEADConfig { + if c == nil { + return nil + } + return c.AEADConfig +} + +func (c *Config) SigningKey() uint64 { + if c == nil { + return 0 + } + return c.SigningKeyId +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 00000000000..801aec92b49 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,282 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 encoding.Field +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.MPI) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoECDH: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.OID) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if e.KeyId != 0 && e.KeyId != priv.KeyId { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) + } + if e.Algo != priv.PubKeyAlgo { + return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + vsG := e.encryptedMPI1.Bytes() + m := e.encryptedMPI2.Bytes() + oid := priv.PublicKey.oid.EncodedBytes() + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + default: + err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + case PubKeyAlgoElGamal: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoECDH: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + if err != nil { + return err + } + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + _, err := w.Write(e.encryptedMPI1.EncodedBytes()) + return err + case PubKeyAlgoElGamal: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoECDH: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + default: + panic("internal error") + } +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + cipherMPI := encoding.NewMPI(cipherText) + packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + _, err = w.Write(cipherMPI.EncodedBytes()) + return err +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { + return err + } + _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) + return err +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { + vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) + if err != nil { + return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) + } + + g := encoding.NewMPI(vsG) + m := encoding.NewOID(c) + + packetLen := 10 /* header length */ + packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(g.EncodedBytes()); err != nil { + return err + } + _, err = w.Write(m.EncodedBytes()) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go new file mode 100644 index 00000000000..4be987609be --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -0,0 +1,91 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + Format uint8 + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.Format = buf[0] + l.IsBinary = l.Format == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go new file mode 100644 index 00000000000..4f26d0a00b7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go @@ -0,0 +1,137 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. On successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 00000000000..41c35de219d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go new file mode 100644 index 00000000000..e3879199e6b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go new file mode 100644 index 00000000000..65203813778 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,523 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" + +import ( + "bytes" + "crypto/cipher" + "crypto/rsa" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + buf bytes.Buffer + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + bufLen := w.buf.Len() + if bufLen > 512 { + for power := uint(30); ; power-- { + l := 1 << power + if bufLen >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(w.buf.Next(l)) + if err != nil { + return + } + if m != l { + return 0, io.ErrShortWrite + } + break + } + } + } + return w.buf.Write(p) +} + +func (w *partialLengthWriter) Close() (err error) { + len := w.buf.Len() + err = serializeLength(w.w, len) + if err != nil { + return err + } + _, err = w.buf.WriteTo(w.w) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + return serializeLength(w, length) +} + +// serializeType writes an OpenPGP packet type to w. See RFC 4880, section +// 4.2. +func serializeType(w io.Writer, ptype packetType) (err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + return +} + +// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section +// 4.2.2. +func serializeLength(w io.Writer, length int) (err error) { + var buf [5]byte + var n int + + if length < 192 { + buf[0] = byte(length) + n = 1 + } else if length < 8384 { + length -= 192 + buf[0] = 192 + byte(length>>8) + buf[1] = byte(length) + n = 2 + } else { + buf[0] = 255 + buf[1] = byte(length >> 24) + buf[2] = byte(length >> 16) + buf[3] = byte(length >> 8) + buf[4] = byte(length) + n = 5 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 + packetTypeAEADEncrypted packetType = 20 +) + +// EncryptedDataPacket holds encrypted data. It is currently implemented by +// SymmetricallyEncrypted and AEADEncrypted. +type EncryptedDataPacket interface { + Decrypt(CipherFunction, []byte) (io.ReadCloser, error) +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + isSubkey := tag == packetTypePublicSubkey + p = &PublicKey{IsSubkey: isSubkey} + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + err = errors.UnsupportedError("Symmetrically encrypted packets without MDC are not supported") + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + case packetTypeAEADEncrypted: + p = new(AEADEncrypted) + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0x00 + SigTypeText = 0x01 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction algorithm.CipherFunction + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + return algorithm.CipherFunction(cipher).KeySize() +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + return algorithm.CipherFunction(cipher).BlockSize() +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + return algorithm.CipherFunction(cipher).New(key) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) + +// AEADMode represents the different Authenticated Encryption with Associated +// Data specified for OpenPGP. +type AEADMode algorithm.AEADMode + +const ( + AEADModeEAX AEADMode = 1 + AEADModeOCB AEADMode = 2 + AEADModeExperimentalGCM AEADMode = 100 +) + +func (mode AEADMode) NonceLength() int { + return algorithm.AEADMode(mode).NonceLength() +} + +func (mode AEADMode) TagLength() int { + return algorithm.AEADMode(mode).TagLength() +} + +// new returns a fresh instance of the given mode. +func (mode AEADMode) new(block cipher.Block) cipher.AEAD { + return algorithm.AEADMode(mode).New(block) +} + +// ReasonForRevocation represents a revocation reason code as per RFC4880 +// section 5.2.3.23. +type ReasonForRevocation uint8 + +const ( + NoReason ReasonForRevocation = 0 + KeySuperseded ReasonForRevocation = 1 + KeyCompromised ReasonForRevocation = 2 + KeyRetired ReasonForRevocation = 3 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 00000000000..854f9c4bbbe --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,780 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "golang.org/x/crypto/curve25519" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/ed25519" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or + // crypto.Signer/crypto.Decrypter (Decryptor RSA only). + PrivateKey interface{} + sha1Checksum bool + iv []byte + + // Type of encryption of the S2K packet + // Allowed values are 0 (Not encrypted), 254 (SHA1), or + // 255 (2-byte checksum) + s2kType S2KType + // Full parameters of the S2K packet + s2kParams *s2k.Params +} + +//S2KType s2k packet type +type S2KType uint8 + +const ( + // S2KNON unencrypt + S2KNON S2KType = 0 + // S2KSHA1 sha1 sum check + S2KSHA1 S2KType = 254 + // S2KCHECKSUM sum check + S2KCHECKSUM S2KType = 255 +) + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEdDSAPrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pub := priv.Public().(ed25519.PublicKey) + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pub) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA, ECDSA or EdDSA. +func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.Public().(type) { + case *rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) + case *ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) + case *ed25519.PublicKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, pubkey) + case ed25519.PublicKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. +func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { + pk := new(PrivateKey) + switch priv := decrypter.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + case *elgamal.PrivateKey: + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + case *ecdh.PrivateKey: + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + default: + panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") + } + pk.PrivateKey = decrypter + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + v5 := pk.PublicKey.Version == 5 + + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.s2kType = S2KType(buf[0]) + var optCount [1]byte + if v5 { + if _, err = readFull(r, optCount[:]); err != nil { + return + } + } + + switch pk.s2kType { + case S2KNON: + pk.s2k = nil + pk.Encrypted = false + case S2KSHA1, S2KCHECKSUM: + if v5 && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError("wrong s2k identifier for version 5") + } + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.s2kParams, err = s2k.ParseIntoParams(r) + if err != nil { + return + } + if pk.s2kParams.Dummy() { + return + } + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return + } + pk.Encrypted = true + if pk.s2kType == S2KSHA1 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + var privateKeyData []byte + if v5 { + var n [4]byte /* secret material four octet count */ + _, err = readFull(r, n[:]) + if err != nil { + return + } + count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) + if !pk.Encrypted { + count = count + 2 /* two octet checksum */ + } + privateKeyData = make([]byte, count) + _, err = readFull(r, privateKeyData) + if err != nil { + return + } + } else { + privateKeyData, err = ioutil.ReadAll(r) + if err != nil { + return + } + } + if !pk.Encrypted { + return pk.parsePrivateKey(privateKeyData) + } + + pk.encryptedData = privateKeyData + return +} + +// Dummy returns true if the private key is a dummy key. This is a GNU extension. +func (pk *PrivateKey) Dummy() bool { + return pk.s2kParams.Dummy() +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + contents := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(contents) + if err != nil { + return + } + if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { + return + } + + optional := bytes.NewBuffer(nil) + if pk.Encrypted || pk.Dummy() { + optional.Write([]byte{uint8(pk.cipher)}) + if err := pk.s2kParams.Serialize(optional); err != nil { + return err + } + if pk.Encrypted { + optional.Write(pk.iv) + } + } + if pk.Version == 5 { + contents.Write([]byte{uint8(optional.Len())}) + } + io.Copy(contents, optional) + + if !pk.Dummy() { + l := 0 + var priv []byte + if !pk.Encrypted { + buf := bytes.NewBuffer(nil) + err = pk.serializePrivateKey(buf) + if err != nil { + return err + } + l = buf.Len() + if pk.sha1Checksum { + h := sha1.New() + h.Write(buf.Bytes()) + buf.Write(h.Sum(nil)) + } else { + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + } + priv = buf.Bytes() + } else { + priv, l = pk.encryptedData, len(pk.encryptedData) + } + + if pk.Version == 5 { + contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) + } + contents.Write(priv) + } + + ptype := packetTypePrivateKey + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, contents.Len()) + if err != nil { + return + } + _, err = io.Copy(w, contents) + if err != nil { + return + } + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { + return err + } + _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) + return err +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()) + return err +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { + keySize := ed25519.PrivateKeySize - ed25519.PublicKeySize + _, err := w.Write(encoding.NewMPI((*priv)[:keySize]).EncodedBytes()) + return err +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.D).EncodedBytes()) + return err +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + err := pk.parsePrivateKey(data) + if _, ok := err.(errors.KeyInvalidError); ok { + return errors.KeyInvalidError("invalid key parameters") + } + if err != nil { + return err + } + + // Mark key as unencrypted + pk.s2kType = S2KNON + pk.s2k = nil + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +// Encrypt encrypts an unencrypted private key using a passphrase. +func (pk *PrivateKey) Encrypt(passphrase []byte) error { + priv := bytes.NewBuffer(nil) + err := pk.serializePrivateKey(priv) + if err != nil { + return err + } + + //Default config of private key encryption + pk.cipher = CipherAES256 + s2kConfig := &s2k.Config{ + S2KMode: 3, //Iterated + S2KCount: 65536, + Hash: crypto.SHA256, + } + + pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig) + if err != nil { + return err + } + privateKeyBytes := priv.Bytes() + key := make([]byte, pk.cipher.KeySize()) + + pk.sha1Checksum = true + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return err + } + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = rand.Read(pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + + if pk.sha1Checksum { + pk.s2kType = S2KSHA1 + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + pk.s2kType = S2KCHECKSUM + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) + } + + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + pk.Encrypted = true + pk.PrivateKey = nil + return err +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *ed25519.PrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + return +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + p := new(encoding.MPI) + if _, err := p.ReadFrom(buf); err != nil { + return err + } + + q := new(encoding.MPI) + if _, err := q.ReadFrom(buf); err != nil { + return err + } + + rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) + if err := rsaPriv.Validate(); err != nil { + return errors.KeyInvalidError(err.Error()) + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateDSAParameters(dsaPriv); err != nil { + return err + } + pk.PrivateKey = dsaPriv + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + priv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateElGamalParameters(priv); err != nil { + return err + } + pk.PrivateKey = priv + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := new(ecdsa.PrivateKey) + ecdsaPriv.PublicKey = *ecdsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + ecdsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + if err := validateECDSAParameters(ecdsaPriv); err != nil { + return err + } + pk.PrivateKey = ecdsaPriv + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + ecdhPriv := new(ecdh.PrivateKey) + ecdhPriv.PublicKey = *ecdhPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + ecdhPriv.D = d.Bytes() + if err := validateECDHParameters(ecdhPriv); err != nil { + return err + } + pk.PrivateKey = ecdhPriv + + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPub := pk.PublicKey.PublicKey.(*ed25519.PublicKey) + eddsaPriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + priv := d.Bytes() + copy(eddsaPriv[32-len(priv):32], priv) + copy(eddsaPriv[32:], (*eddsaPub)[:]) + if err := validateEdDSAParameters(&eddsaPriv); err != nil { + return err + } + pk.PrivateKey = &eddsaPriv + + return nil +} + +func validateECDSAParameters(priv *ecdsa.PrivateKey) error { + return validateCommonECC(priv.Curve, priv.D.Bytes(), priv.X, priv.Y) +} + +func validateECDHParameters(priv *ecdh.PrivateKey) error { + if priv.CurveType != ecc.Curve25519 { + return validateCommonECC(priv.Curve, priv.D, priv.X, priv.Y) + } + // Handle Curve25519 + Q := priv.X.Bytes()[1:] + var d [32]byte + // Copy reversed d + l := len(priv.D) + for i := 0; i < l; i++ { + d[i] = priv.D[l-i-1] + } + var expectedQ [32]byte + curve25519.ScalarBaseMult(&expectedQ, &d) + if !bytes.Equal(Q, expectedQ[:]) { + return errors.KeyInvalidError("ECDH curve25519: invalid point") + } + return nil +} + +func validateCommonECC(curve elliptic.Curve, d []byte, X, Y *big.Int) error { + // the public point should not be at infinity (0,0) + zero := new(big.Int) + if X.Cmp(zero) == 0 && Y.Cmp(zero) == 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", curve.Params().Name)) + } + // re-derive the public point Q' = (X,Y) = dG + // to compare to declared Q in public key + expectedX, expectedY := curve.ScalarBaseMult(d) + if X.Cmp(expectedX) != 0 || Y.Cmp(expectedY) != 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", curve.Params().Name)) + } + return nil +} + +func validateEdDSAParameters(priv *ed25519.PrivateKey) error { + // In EdDSA, the serialized public point is stored as part of private key (together with the seed), + // hence we can re-derive the key from the seed + seed := priv.Seed() + expectedPriv := ed25519.NewKeyFromSeed(seed) + if !bytes.Equal(*priv, expectedPriv) { + return errors.KeyInvalidError("eddsa: invalid point") + } + return nil +} + +func validateDSAParameters(priv *dsa.PrivateKey) error { + p := priv.P // group prime + q := priv.Q // subgroup order + g := priv.G // g has order q mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("dsa: invalid group") + } + // expect p > q + if p.Cmp(q) <= 0 { + return errors.KeyInvalidError("dsa: invalid group prime") + } + // q should be large enough and divide p-1 + pSub1 := new(big.Int).Sub(p, one) + if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // confirm that g has order q mod p + if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("dsa: mismatching values") + } + + return nil +} + +func validateElGamalParameters(priv *elgamal.PrivateKey) error { + p := priv.P // group prime + g := priv.G // g has order p-1 mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // Expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + if p.BitLen() < 1024 { + return errors.KeyInvalidError("elgamal: group order too small") + } + pSub1 := new(big.Int).Sub(p, one) + if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + // Since p-1 is not prime, g might have a smaller order that divides p-1. + // We cannot confirm the exact order of g, but we make sure it is not too small. + gExpI := new(big.Int).Set(g) + i := 1 + threshold := 2 << 17 // we want order > threshold + for i < threshold { + i++ // we check every order to make sure key validation is not easily bypassed by guessing y' + gExpI.Mod(new(big.Int).Mul(gExpI, g), p) + if gExpI.Cmp(one) == 0 { + return errors.KeyInvalidError("elgamal: order too small") + } + } + // Check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("elgamal: mismatching values") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go new file mode 100644 index 00000000000..029b8f1aab2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go @@ -0,0 +1,12 @@ +package packet + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" + +// Generated by `gpg --export-secret-keys` followed by a manual extraction of +// the ElGamal subkey from the packets. +const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" + +// pkcs1PrivKeyHex is a PKCS#1, RSA private key. +// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` +const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 00000000000..297dc40ba8c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,825 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "golang.org/x/crypto/ed25519" +) + +type kdfHashFunction byte +type kdfAlgorithm byte + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + Version int + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey + Fingerprint []byte + KeyId uint64 + IsSubkey bool + + // RFC 4880 fields + n, e, p, q, g, y encoding.Field + + // RFC 6637 fields + // oid contains the OID byte sequence identifying the elliptic curve used + oid encoding.Field + + // kdf stores key derivation function parameters + // used for ECDH encryption. See RFC 6637, Section 9. + kdf encoding.Field +} + +// UpgradeToV5 updates the version of the key to v5, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV5() { + pk.Version = 5 + pk.setFingerprintAndKeyId() +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeForHash(io.Writer) error + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: new(encoding.MPI).SetBig(pub.N), + e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + q: new(encoding.MPI).SetBig(pub.Q), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), + } + + curveInfo := ecc.FindByCurve(pub.Curve) + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + var pk *PublicKey + var curveInfo *ecc.CurveInfo + var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) + if pub.CurveType == ecc.Curve25519 { + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(pub.X.Bytes()), + kdf: kdf, + } + curveInfo = ecc.FindByName("Curve25519") + } else { + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), + kdf: kdf, + } + curveInfo = ecc.FindByCurve(pub.Curve) + } + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewEdDSAPublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { + curveInfo := ecc.FindByName("Ed25519") + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEdDSA, + PublicKey: pub, + oid: curveInfo.Oid, + // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B + p: encoding.NewMPI(append([]byte{0x40}, *pub...)), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) + } + + pk.Version = int(buf[0]) + if pk.Version == 5 { + var n [4]byte + _, err = readFull(r, n[:]) + if err != nil { + return + } + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + err = pk.parseECDSA(r) + case PubKeyAlgoECDH: + err = pk.parseECDH(r) + case PubKeyAlgoEdDSA: + err = pk.parseEdDSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerprintAndKeyId() + return +} + +func (pk *PublicKey) setFingerprintAndKeyId() { + // RFC 4880, section 12.2 + if pk.Version == 5 { + fingerprint := sha256.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 32) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) + } else { + fingerprint := sha1.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 20) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) + } +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n = new(encoding.MPI) + if _, err = pk.n.ReadFrom(r); err != nil { + return + } + pk.e = new(encoding.MPI) + if _, err = pk.e.ReadFrom(r); err != nil { + return + } + + if len(pk.e.Bytes()) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.Bytes()), + E: 0, + } + for i := 0; i < len(pk.e.Bytes()); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.Bytes()[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.q = new(encoding.MPI) + if _, err = pk.q.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) + dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) + dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) + dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) + elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) + elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = elgamal + return +} + +// parseECDSA parses ECDSA public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + var c elliptic.Curve + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil || curveInfo.SigAlgorithm != ecc.ECDSA { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + c = curveInfo.Curve + x, y := elliptic.Unmarshal(c, pk.p.Bytes()) + if x == nil { + return errors.UnsupportedError("failed to parse EC point") + } + pk.PublicKey = &ecdsa.PublicKey{Curve: c, X: x, Y: y} + return +} + +// parseECDH parses ECDH public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDH(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.kdf = new(encoding.OID) + if _, err = pk.kdf.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + c := curveInfo.Curve + cType := curveInfo.CurveType + + var x, y *big.Int + if cType == ecc.Curve25519 { + x = new(big.Int) + x.SetBytes(pk.p.Bytes()) + } else { + x, y = elliptic.Unmarshal(c, pk.p.Bytes()) + } + if x == nil { + return errors.UnsupportedError("failed to parse EC point") + } + + if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { + return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 { + return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved))) + } + kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) + } + kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) + } + + pk.PublicKey = &ecdh.PublicKey{ + CurveType: cType, + Curve: c, + X: x, + Y: y, + KDF: ecdh.KDF{ + Hash: kdfHash, + Cipher: kdfCipher, + }, + } + return +} + +func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil || curveInfo.SigAlgorithm != ecc.EdDSA { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + eddsa := make(ed25519.PublicKey, ed25519.PublicKeySize) + switch flag := pk.p.Bytes()[0]; flag { + case 0x04: + // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + case 0x40: + copy(eddsa[:], pk.p.Bytes()[1:]) + default: + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + } + + pk.PublicKey = &eddsa + return +} + +// SerializeForHash serializes the PublicKey to w with the special packet +// header format needed for hashing. +func (pk *PublicKey) SerializeForHash(w io.Writer) error { + pk.SerializeSignaturePrefix(w) + return pk.serializeWithoutHeaders(w) +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { + var pLength = pk.algorithmSpecificByteCount() + if pk.Version == 5 { + pLength += 10 // version, timestamp (4), algorithm, key octet count (4). + w.Write([]byte{ + 0x9A, + byte(pLength >> 24), + byte(pLength >> 16), + byte(pLength >> 8), + byte(pLength), + }) + return + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + length += pk.algorithmSpecificByteCount() + if pk.Version == 5 { + length += 4 // octet key count + } + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +func (pk *PublicKey) algorithmSpecificByteCount() int { + length := 0 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += int(pk.n.EncodedLength()) + length += int(pk.e.EncodedLength()) + case PubKeyAlgoDSA: + length += int(pk.p.EncodedLength()) + length += int(pk.q.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoElGamal: + length += int(pk.p.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoECDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + case PubKeyAlgoECDH: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + length += int(pk.kdf.EncodedLength()) + case PubKeyAlgoEdDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + default: + panic("unknown public key algorithm") + } + return length +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + t := uint32(pk.CreationTime.Unix()) + if _, err = w.Write([]byte{ + byte(pk.Version), + byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), + byte(pk.PubKeyAlgo), + }); err != nil { + return + } + + if pk.Version == 5 { + n := pk.algorithmSpecificByteCount() + if _, err = w.Write([]byte{ + byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), + }); err != nil { + return + } + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + if _, err = w.Write(pk.n.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.e.EncodedBytes()) + return + case PubKeyAlgoDSA: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.q.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoElGamal: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoECDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoECDH: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.kdf.EncodedBytes()) + return + case PubKeyAlgoEdDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + eddsaPublicKey := pk.PublicKey.(*ed25519.PublicKey) + + sigR := sig.EdDSASigR.Bytes() + sigS := sig.EdDSASigS.Bytes() + + eddsaSig := make([]byte, ed25519.SignatureSize) + copy(eddsaSig[32-len(sigR):32], sigR) + copy(eddsaSig[64-len(sigS):], sigS) + + if !ed25519.Verify(*eddsaPublicKey, hashBytes, eddsaSig) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + if err != nil { + return nil, err + } + + err = signed.SerializeForHash(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.BitLength() + case PubKeyAlgoDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoElGamal: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDH: + bitLength = pk.p.BitLength() + case PubKeyAlgoEdDSA: + bitLength = pk.p.BitLength() + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired or is created in the future. +func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { + if pk.CreationTime.After(currentTime) { + return true + } + if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { + return false + } + expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go new file mode 100644 index 00000000000..b255f1f6f8f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go @@ -0,0 +1,24 @@ +package packet + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" + +const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" + +const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" + +const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" + +const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" + +const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" + +const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" + +// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key +const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go new file mode 100644 index 00000000000..94ae9ad9aca --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,78 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go new file mode 100644 index 00000000000..6765ea758cc --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,991 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + Version int + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + + // Metadata includes format, filename and time, and is protected by v5 + // signatures of type 0x00 or 0x01. This metadata is included into the hash + // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. + Metadata *LiteralData + + CreationTime time.Time + + RSASignature encoding.Field + DSASigR, DSASigS encoding.Field + ECDSASigR, ECDSASigS encoding.Field + EdDSASigR, EdDSASigS encoding.Field + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredAEAD []uint8 + IssuerKeyId *uint64 + IssuerFingerprint []byte + IsPrimaryId *bool + + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *ReasonForRevocation + RevocationReasonText string + + // In a self-signature, these flags are set there is a features subpacket + // indicating that the issuer implementation supports these features + // (section 5.2.5.25). + MDC, AEAD, V5Keys bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + sig.Version = int(buf[0]) + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + hashedSubpackets := make([]byte, hashedSubpacketsLength) + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + sig.buildHashSuffix(hashedSubpackets) + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature = new(encoding.MPI) + _, err = sig.RSASignature.ReadFrom(r) + case PubKeyAlgoDSA: + sig.DSASigR = new(encoding.MPI) + if _, err = sig.DSASigR.ReadFrom(r); err != nil { + return + } + + sig.DSASigS = new(encoding.MPI) + _, err = sig.DSASigS.ReadFrom(r) + case PubKeyAlgoECDSA: + sig.ECDSASigR = new(encoding.MPI) + if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { + return + } + + sig.ECDSASigS = new(encoding.MPI) + _, err = sig.ECDSASigS.ReadFrom(r) + case PubKeyAlgoEdDSA: + sig.EdDSASigR = new(encoding.MPI) + if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { + return + } + + sig.EdDSASigS = new(encoding.MPI) + if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { + return + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprintSubpacket signatureSubpacketType = 33 + prefAeadAlgosSubpacket signatureSubpacketType = 34 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + if sig.Version > 4 { + err = errors.StructuralError("issuer subpacket found in v5 key") + } + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = ReasonForRevocation(subpacket[0]) + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. + if !isHashed { + return + } + if len(subpacket) > 0 { + if subpacket[0]&0x01 != 0 { + sig.MDC = true + } + if subpacket[0]&0x02 != 0 { + sig.AEAD = true + } + if subpacket[0]&0x04 != 0 { + sig.V5Keys = true + } + } + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case policyUriSubpacket: + // Policy URI, section 5.2.3.20 + if !isHashed { + return + } + sig.PolicyURI = string(subpacket) + case issuerFingerprintSubpacket: + v, l := subpacket[0], len(subpacket[1:]) + if v == 5 && l != 32 || v != 5 && l != 20 { + return nil, errors.StructuralError("bad fingerprint length") + } + sig.IssuerFingerprint = make([]byte, l) + copy(sig.IssuerFingerprint, subpacket[1:]) + sig.IssuerKeyId = new(uint64) + if v == 5 { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) + } else { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) + } + case prefAeadAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredAEAD = make([]byte, len(subpacket)) + copy(sig.PreferredAEAD, subpacket) + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { + return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// SigExpired returns whether sig is a signature that has expired or is created +// in the future. +func (sig *Signature) SigExpired(currentTime time.Time) bool { + if sig.CreationTime.After(currentTime) { + return true + } + if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { + hash, ok := s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + + hashedFields := bytes.NewBuffer([]byte{ + uint8(sig.Version), + uint8(sig.SigType), + uint8(sig.PubKeyAlgo), + uint8(hash), + uint8(len(hashedSubpackets) >> 8), + uint8(len(hashedSubpackets)), + }) + hashedFields.Write(hashedSubpackets) + + var l uint64 = uint64(6 + len(hashedSubpackets)) + if sig.Version == 5 { + hashedFields.Write([]byte{0x05, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } else { + hashedFields.Write([]byte{0x04, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } + sig.HashSuffix = make([]byte, hashedFields.Len()) + copy(sig.HashSuffix, hashedFields.Bytes()) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + hashedSubpackets := make([]byte, hashedSubpacketsLen) + serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + sig.Version = priv.PublicKey.Version + sig.IssuerFingerprint = priv.PublicKey.Fingerprint + sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) + if err != nil { + return err + } + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + sig.RSASignature = encoding.NewMPI(sigdata) + } + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR = new(encoding.MPI).SetBig(r) + sig.DSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = new(encoding.MPI).SetBig(r) + sig.ECDSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoEdDSA: + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, crypto.Hash(0)) + if err == nil { + sig.EdDSASigR = encoding.NewMPI(sigdata[:32]) + sig.EdDSASigS = encoding.NewMPI(sigdata[32:]) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, +// the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, + config *Config) error { + h, err := keySignatureHash(hashKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, signingKey, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeKey computes a revocation signature of pub using priv. On success, the signature is +// stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keyRevocationHash(pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = int(sig.RSASignature.EncodedLength()) + case PubKeyAlgoDSA: + sigLength = int(sig.DSASigR.EncodedLength()) + sigLength += int(sig.DSASigS.EncodedLength()) + case PubKeyAlgoECDSA: + sigLength = int(sig.ECDSASigR.EncodedLength()) + sigLength += int(sig.ECDSASigS.EncodedLength()) + case PubKeyAlgoEdDSA: + sigLength = int(sig.EdDSASigR.EncodedLength()) + sigLength += int(sig.EdDSASigS.EncodedLength()) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + if sig.Version == 5 { + length -= 4 // eight-octet instead of four-octet big endian + } + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + err = sig.serializeBody(w) + if err != nil { + return err + } + return +} + +func (sig *Signature) serializeBody(w io.Writer) (err error) { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) + fields := sig.HashSuffix[:6+hashedSubpacketsLen] + _, err = w.Write(fields) + if err != nil { + return + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + _, err = w.Write(sig.RSASignature.EncodedBytes()) + case PubKeyAlgoDSA: + if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.DSASigS.EncodedBytes()) + case PubKeyAlgoECDSA: + if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.ECDSASigS.EncodedBytes()) + case PubKeyAlgoEdDSA: + if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + } + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) + } + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures. + + var features = byte(0x00) + if sig.MDC { + features |= 0x01 + } + if sig.AEAD { + features |= 0x02 + } + if sig.V5Keys { + features |= 0x04 + } + + if features != 0x00 { + subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) + } + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + + if len(sig.PreferredAEAD) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) + } + + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) + } + + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. + if sig.EmbeddedSignature != nil { + var buf bytes.Buffer + err = sig.EmbeddedSignature.serializeBody(&buf) + if err != nil { + return + } + subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) + } + + return +} + +// AddMetadataToHashSuffix modifies the current hash suffix to include metadata +// (format, filename, and time). Version 5 keys protect this data including it +// in the hash computation. See section 5.2.4. +func (sig *Signature) AddMetadataToHashSuffix() { + if sig == nil || sig.Version != 5 { + return + } + if sig.SigType != 0x00 && sig.SigType != 0x01 { + return + } + lit := sig.Metadata + if lit == nil { + // This will translate into six 0x00 bytes. + lit = &LiteralData{} + } + + // Extract the current byte count + n := sig.HashSuffix[len(sig.HashSuffix)-8:] + l := uint64( + uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | + uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) + + suffix := bytes.NewBuffer(nil) + suffix.Write(sig.HashSuffix[:l]) + + // Add the metadata + var buf [4]byte + buf[0] = lit.Format + fileName := lit.FileName + if len(lit.FileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + suffix.Write(buf[:2]) + suffix.Write([]byte(lit.FileName)) + binary.BigEndian.PutUint32(buf[:], lit.Time) + suffix.Write(buf[:]) + + // Update the counter and restore trailing bytes + l = uint64(suffix.Len()) + suffix.Write([]byte{0x05, 0xff}) + suffix.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + sig.HashSuffix = suffix.Bytes() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 00000000000..0e9f51d51b3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,267 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + Version int + CipherFunc CipherFunction + Mode AEADMode + s2k func(out, in []byte) + aeadNonce []byte + encryptedKey []byte +} + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.Version = int(buf[0]) + if ske.Version != 4 && ske.Version != 5 { + return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + if ske.Version == 5 { + mode := make([]byte, 1) + if _, err := r.Read(mode); err != nil { + return errors.StructuralError("cannot read AEAD octect from packet") + } + ske.Mode = AEADMode(mode[0]) + } + + var err error + if ske.s2k, err = s2k.Parse(r); err != nil { + if _, ok := err.(errors.ErrDummyPrivateKey); ok { + return errors.UnsupportedError("missing key GNU extension in session key") + } + return err + } + + if ske.Version == 5 { + // AEAD nonce + nonce := make([]byte, ske.Mode.NonceLength()) + _, err := readFull(r, nonce) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + ske.aeadNonce = nonce + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + switch ske.Version { + case 4: + plaintextKey, cipherFunc, err := ske.decryptV4(key) + return plaintextKey, cipherFunc, err + case 5: + plaintextKey, err := ske.decryptV5(key) + return plaintextKey, CipherFunction(0), err + } + err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + return nil, CipherFunction(0), err +} + +func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError( + "unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if len(plaintextKey) != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError( + "length of decrypted key not equal to cipher keysize") + } + return plaintextKey, cipherFunc, nil +} + +func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { + blockCipher := CipherFunction(ske.CipherFunc).new(key) + aead := ske.Mode.new(blockCipher) + + adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} + plaintextKey, err := aead.Open(nil, ske.aeadNonce, ske.encryptedKey, adata) + if err != nil { + return nil, err + } + return plaintextKey, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. +// The packet contains a random session key, encrypted by a key derived from +// the given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on +// whether config.AEADConfig != nil. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + + err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) + if err != nil { + return + } + + key = sessionKey + return +} + +// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. +// The packet contains the given session key, encrypted by a key derived from +// the given passphrase. The session key must be passed to +// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on +// whether config.AEADConfig != nil. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + var version int + if config.AEAD() != nil { + version = 5 + } else { + version = 4 + } + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + var packetLength int + switch version { + case 4: + packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + case 5: + nonceLen := config.AEAD().Mode().NonceLength() + tagLen := config.AEAD().Mode().TagLength() + packetLength = 3 + len(s2kBytes) + nonceLen + keySize + tagLen + } + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + buf := make([]byte, 2) + // Symmetric Key Encrypted Version + buf[0] = byte(version) + // Cipher function + buf[1] = byte(cipherFunc) + + if version == 5 { + // AEAD mode + buf = append(buf, byte(config.AEAD().Mode())) + } + _, err = w.Write(buf) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + switch version { + case 4: + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + case 5: + blockCipher := cipherFunc.new(keyEncryptingKey) + mode := config.AEAD().Mode() + aead := mode.new(blockCipher) + // Sample nonce using random reader + nonce := make([]byte, config.AEAD().Mode().NonceLength()) + _, err = io.ReadFull(config.Random(), nonce) + if err != nil { + return + } + // Seal and write (encryptedData includes auth. tag) + adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} + encryptedData := aead.Seal(nil, nonce, sessionKey, adata) + _, err = w.Write(nonce) + if err != nil { + return + } + _, err = w.Write(encryptedData) + if err != nil { + return + } + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 00000000000..8b84de177f9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + Contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.Contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted Contents of the +// packet can be read. An incorrect key will only be detected after trying +// to decrypt the entire data. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.Contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + + plaintext := cipher.StreamReader{S: s, R: se.Contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous Contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.ErrMDCMissing + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.ErrMDCMissing + } + } + + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.ErrMDCHashMismatch + } + // The hash already includes the MDC header, but we still check its value + // to confirm encryption correctness + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.ErrMDCMissing + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + Contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go new file mode 100644 index 00000000000..0f760ade2cf --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + err = sp.Serialize(&buf) + if err != nil { + return err + } + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go new file mode 100644 index 00000000000..d6bea7d4acc --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go new file mode 100644 index 00000000000..acb7cc6e29f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -0,0 +1,528 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + Signature *packet.Signature // the signature packet itself. + SignatureError error // nil if the signature is good. + UnverifiedSignatures []*packet.Signature // all other unverified signature packets. + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted + var edp packet.EncryptedDataPacket + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted, *packet.AEADEncrypted: + edp = p.(packet.EncryptedDataPacket) + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring, config) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if errDec != nil { + continue + } + } + // Try to decrypt symmetrically encrypted + decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + // On wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: + // only for < 5% of cases we will proceed to decrypt the data + if err == nil { + decrypted, err = edp.Decrypt(cipherFunc, key) + // TODO: ErrKeyIncorrect is no longer thrown on SEIP decryption, + // but it might still be relevant for when we implement AEAD decryption (otherwise, remove?) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) + if sensitiveParsingErr != nil { + return nil, errors.StructuralError("parsing error") + } + return mdFinal, nil +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash + var prevLast bool +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if prevLast { + return nil, errors.UnsupportedError("nested signature packets") + } + + if p.IsLast { + prevLast = true + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md.SignatureError = err + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.IsSigned && md.SignatureError == nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if hashId == crypto.MD5 { + return nil, nil, errors.UnsupportedError("insecure hash algorithm: MD5") + } + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) + if sensitiveParsingError == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails + config *packet.Config +} + +func (scr *signatureCheckReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) + + // Hash only if required + if scr.md.SignedBy != nil { + scr.wrappedHash.Write(buf[:n]) + } + + if sensitiveParsingError == io.EOF { + var p packet.Packet + var readError error + var sig *packet.Signature + + p, readError = scr.packets.Next() + for readError == nil { + var ok bool + if sig, ok = p.(*packet.Signature); ok { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.Metadata = scr.md.LiteralData + } + + // If signature KeyID matches + if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + now := scr.config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + signatureError = errors.ErrKeyRevoked + } + if sig.SigExpired(now) { + signatureError = errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { + signatureError = errors.ErrKeyExpired + } + } + scr.md.Signature = sig + scr.md.SignatureError = signatureError + } else { + scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) + } + } + + p, readError = scr.packets.Next() + } + + if scr.md.SignedBy != nil && scr.md.Signature == nil { + if scr.md.UnverifiedSignatures == nil { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") + } else { + scr.md.SignatureError = errors.StructuralError("No matching signature found") + } + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + var expectedHashes []crypto.Hash + return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) +} + +// CheckDetachedSignatureAndHash performs the same actions as +// CheckDetachedSignature and checks that the expected hash functions were used. +func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + expectedHashesLen := len(expectedHashes) + packets := packet.NewReader(signature) + var sig *packet.Signature + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + var ok bool + sig, ok = p.(*packet.Signature) + if !ok { + return nil, errors.StructuralError("non signature packet found") + } + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + + for i, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + break + } + if i+1 == expectedHashesLen { + return nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + } + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + err = key.PublicKey.VerifySignature(h, sig) + if err == nil { + now := config.Now() + if key.Revoked(now) || + key.Entity.Revoked(now) || // primary key is revoked (redundant if key is the primary key) + key.Entity.PrimaryIdentity().Revoked(now) { + return key.Entity, errors.ErrKeyRevoked + } + if sig.SigExpired(now) { + return key.Entity, errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) || + key.SelfSignature.SigExpired(now) { + return key.Entity, errors.ErrKeyExpired + } + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body, config) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go new file mode 100644 index 00000000000..8caed36e3e7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -0,0 +1,173 @@ +package openpgp + +const testKey1KeyId = 0xA34D7E18C20C31BB +const testKey3KeyId = 0x338934250CCC0360 +const testKeyP256KeyId = 0xd44a2c495918513e + +const signedInput = "Signed message\nline 2\nline 3\n" +const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" + +const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" + +const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" + +const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" + +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + +// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt +const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" + +const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" + +const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" + +const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" + +const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" + +const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" + +const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" + +const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" + +const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" + +const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK-----` + +const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 +sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk +Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ +AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD +24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX ++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 +B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX +fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA +FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 +ex7En5r7rHR5xwX82Msc+Rq9dSyO +=7MrZ +-----END PGP PUBLIC KEY BLOCK-----` + +const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` + +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` + +const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` + +// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ +const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK-----` + +// Generated with the above private key +const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- +Version: OpenPGP.js v4.10.7 +Comment: https://openpgpjs.org + +xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf +bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G +y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv +UQdl5MlBka1QSNbMq2Bz7XwNPg4= +=6lbM +-----END PGP MESSAGE-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 00000000000..14f58548bd2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,367 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// Config collects configuration parameters for s2k key-stretching +// transformations. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // S2KMode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + S2KMode uint8 + // Hash is the default hash function to be used. If + // nil, SHA256 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. See RFC 4880 Section 3.7.1.3. + S2KCount int +} + +// Params contains all the parameters of the s2k packet +type Params struct { + // mode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + mode uint8 + // hashId is the ID of the hash function used in any of the modes + hashId byte + // salt is a byte array to use as a salt in hashing process + salt []byte + // countByte is used to determine how many rounds of hashing are to + // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. + countByte byte +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + return crypto.SHA256 + } + + return c.Hash +} + +// EncodedCount get encoded count +func (c *Config) EncodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 224 // The common case. Corresponding to 16777216 + } + + i := c.S2KCount + + switch { + case i < 65536: + i = 65536 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 65536 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 96; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Generate generates valid parameters from given configuration. +// It will enforce salted + hashed s2k method +func Generate(rand io.Reader, c *Config) (*Params, error) { + hashId, ok := HashToHashId(c.Hash) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + + params := &Params{ + mode: 3, // Enforce iterared + salted method + hashId: hashId, + salt: make([]byte, 8), + countByte: c.EncodedCount(), + } + + if _, err := io.ReadFull(rand, params.salt); err != nil { + return nil, err + } + + return params, nil +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. If the S2K is a special +// GNU extension that indicates that the private key is missing, then the error +// returned is errors.ErrDummyPrivateKey. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + params, err := ParseIntoParams(r) + if err != nil { + return nil, err + } + + return params.Function() +} + +// ParseIntoParams reads a binary specification for a string-to-key +// transformation from r and returns a struct describing the s2k parameters. +func ParseIntoParams(r io.Reader) (params *Params, err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + params = &Params{ + mode: buf[0], + hashId: buf[1], + } + + switch params.mode { + case 0: + return params, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return nil, err + } + + params.salt = buf[:8] + return params, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return nil, err + } + + params.salt = buf[:8] + params.countByte = buf[8] + return params, nil + case 101: + // This is a GNU extension. See + // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 + if _, err = io.ReadFull(r, buf[:4]); err != nil { + return nil, err + } + if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 { + return params, nil + } + return nil, errors.UnsupportedError("GNU S2K extension") + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Dummy() bool { + return params != nil && params.mode == 101 +} + +func (params *Params) Function() (f func(out, in []byte), err error) { + if params.Dummy() { + return nil, errors.ErrDummyPrivateKey("dummy key found") + } + hashObj, ok := HashIdToHash(params.hashId) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) + } + if !hashObj.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + } + + switch params.mode { + case 0: + f := func(out, in []byte) { + Simple(out, hashObj.New(), in) + } + + return f, nil + case 1: + f := func(out, in []byte) { + Salted(out, hashObj.New(), in, params.salt) + } + + return f, nil + case 3: + f := func(out, in []byte) { + Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte)) + } + + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Serialize(w io.Writer) (err error) { + if _, err = w.Write([]byte{params.mode}); err != nil { + return + } + if _, err = w.Write([]byte{params.hashId}); err != nil { + return + } + if params.Dummy() { + _, err = w.Write(append([]byte("GNU"), 1)) + return + } + if params.mode > 0 { + if _, err = w.Write(params.salt); err != nil { + return + } + if params.mode == 3 { + _, err = w.Write([]byte{params.countByte}) + } + } + return +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + params, err := Generate(rand, c) + if err != nil { + return err + } + err = params.Serialize(w) + if err != nil { + return err + } + + f, err := params.Function() + if err != nil { + return err + } + f(key, passphrase) + return nil +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + if hash, ok := algorithm.HashById[id]; ok { + return hash.HashFunc(), true + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + if hash, ok := algorithm.HashById[id]; ok { + return hash.String(), true + } + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for id, hash := range algorithm.HashById { + if hash.HashFunc() == h { + return id, true + } + } + return 0, false +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go new file mode 100644 index 00000000000..23a24f20305 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -0,0 +1,568 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return errors.InvalidArgumentError("no valid signing keys") + } + if signingKey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signingKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signingKey.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sigLifetimeSecs := config.SigLifetime() + sig.SigLifetimeSecs = &sigLifetimeSecs + sig.IssuerKeyId = &signingKey.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + if _, err = io.Copy(wrappedHash, message); err != nil { + return err + } + + err = sig.Sign(h, signingKey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + + var w io.WriteCloser + if config.AEAD() != nil { + w, err = packet.SerializeAEADEncrypted(ciphertext, key, config.Cipher(), config.AEAD().Mode(), config) + if err != nil { + return + } + } else { + w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + } + + literalData := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literalData, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// EncryptText encrypts a message to a number of recipients and, optionally, +// signs it. Optional information is contained in 'hints', also encrypted, that +// aids the recipients in processing the message. The resulting WriteCloser +// must be closed after the contents of the file have been written. If config +// is nil, sensible defaults will be used. The signing is done in text mode. +func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: sigType, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + h, wrappedHash, err := hashForSignature(hash, sigType) + if err != nil { + return nil, err + } + metadata := &packet.LiteralData{ + Format: 't', + FileName: hints.FileName, + Time: epochSeconds, + } + if hints.IsBinary { + metadata.Format = 'b' + } + return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil + } + return literalData, nil +} + +// encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + candidateAeadModes := []uint8{ + uint8(packet.AEADModeEAX), + uint8(packet.AEADModeOCB), + uint8(packet.AEADModeExperimentalGCM), + } + candidateCompression := []uint8{ + uint8(packet.CompressionNone), + uint8(packet.CompressionZIP), + uint8(packet.CompressionZLIB), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[0:1] + defaultHashes := candidateHashes[0:1] + defaultAeadModes := candidateAeadModes[0:1] + defaultCompression := candidateCompression[0:1] + + encryptKeys := make([]Key, len(to)) + // AEAD is used only if every key supports it. + aeadSupported := true + + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") + } + + sig := to[i].PrimaryIdentity().SelfSignature + if sig.AEAD == false { + aeadSupported = false + } + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + preferredAeadModes := sig.PreferredAEAD + if len(preferredAeadModes) == 0 { + preferredAeadModes = defaultAeadModes + } + preferredCompression := sig.PreferredCompression + if len(preferredCompression) == 0 { + preferredCompression = defaultCompression + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + candidateAeadModes = intersectPreferences(candidateAeadModes, preferredAeadModes) + candidateCompression = intersectPreferences(candidateCompression, preferredCompression) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 || len(candidateAeadModes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + mode := packet.AEADMode(candidateAeadModes[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + var payload io.WriteCloser + if config.AEAD() != nil && aeadSupported { + payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) + if err != nil { + return + } + } else { + payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, symKey, config) + if err != nil { + return + } + } + payload, err = handleCompression(payload, candidateCompression, config) + if err != nil { + return nil, err + } + + return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + defaultHashes := candidateHashes[0:1] + preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + if len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") + } + + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + wrappedHash hash.Hash + h hash.Hash + signer *packet.PrivateKey + sigType packet.SignatureType + config *packet.Config + metadata *packet.LiteralData // V5 signatures protect document metadata +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.wrappedHash.Write(data) + switch s.sigType { + case packet.SigTypeBinary: + return s.literalData.Write(data) + case packet.SigTypeText: + flag := 0 + return writeCanonical(s.literalData, data, &flag) + } + return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + Version: s.signer.Version, + SigType: s.sigType, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + Metadata: s.metadata, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { + data = compressed + confAlgo := config.Compression() + if confAlgo == packet.CompressionNone { + return + } + finalAlgo := packet.CompressionNone + // if compression specified by config available we will use it + for _, c := range candidateCompression { + if uint8(confAlgo) == c { + finalAlgo = confAlgo + break + } + } + + if finalAlgo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) + if err != nil { + return + } + } + return data, nil +} diff --git a/vendor/github.com/acomagu/bufpipe/README.md b/vendor/github.com/acomagu/bufpipe/README.md new file mode 100644 index 00000000000..19df083142d --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/README.md @@ -0,0 +1,42 @@ +# bufpipe: Buffered Pipe + +[![CircleCI](https://img.shields.io/circleci/build/github/acomagu/bufpipe.svg?style=flat-square)](https://circleci.com/gh/acomagu/bufpipe) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/acomagu/bufpipe) + +The buffered version of io.Pipe. It's safe for concurrent use. + +## How does it differ from io.Pipe? + +Writes never block because the pipe has variable-sized buffer. + +```Go +r, w := bufpipe.New(nil) +io.WriteString(w, "abc") // No blocking. +io.WriteString(w, "def") // No blocking, too. +w.Close() +io.Copy(os.Stdout, r) +// Output: abcdef +``` + +[Playground](https://play.golang.org/p/PdyBAS3pVob) + +## How does it differ from bytes.Buffer? + +Reads block if the internal buffer is empty until the writer is closed. + +```Go +r, w := bufpipe.New(nil) + +done := make(chan struct{}) +go func() { + io.Copy(os.Stdout, r) // The reads block until the writer is closed. + done <- struct{}{} +}() + +io.WriteString(w, "abc") +io.WriteString(w, "def") +w.Close() +<-done +// Output: abcdef +``` + +[Playground](https://play.golang.org/p/UppmyLeRgX6) diff --git a/vendor/github.com/acomagu/bufpipe/bufpipe.go b/vendor/github.com/acomagu/bufpipe/bufpipe.go new file mode 100644 index 00000000000..846dbcc290f --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/bufpipe.go @@ -0,0 +1,128 @@ +package bufpipe + +import ( + "bytes" + "errors" + "io" + "sync" +) + +// ErrClosedPipe is the error used for read or write operations on a closed pipe. +var ErrClosedPipe = errors.New("bufpipe: read/write on closed pipe") + +type pipe struct { + cond *sync.Cond + buf *bytes.Buffer + rerr, werr error +} + +// A PipeReader is the read half of a pipe. +type PipeReader struct { + *pipe +} + +// A PipeWriter is the write half of a pipe. +type PipeWriter struct { + *pipe +} + +// New creates a synchronous pipe using buf as its initial contents. It can be +// used to connect code expecting an io.Reader with code expecting an io.Writer. +// +// Unlike io.Pipe, writes never block because the internal buffer has variable +// size. Reads block only when the buffer is empty. +// +// It is safe to call Read and Write in parallel with each other or with Close. +// Parallel calls to Read and parallel calls to Write are also safe: the +// individual calls will be gated sequentially. +// +// The new pipe takes ownership of buf, and the caller should not use buf after +// this call. New is intended to prepare a PipeReader to read existing data. It +// can also be used to set the initial size of the internal buffer for writing. +// To do that, buf should have the desired capacity but a length of zero. +func New(buf []byte) (*PipeReader, *PipeWriter) { + p := &pipe{ + buf: bytes.NewBuffer(buf), + cond: sync.NewCond(new(sync.Mutex)), + } + return &PipeReader{ + pipe: p, + }, &PipeWriter{ + pipe: p, + } +} + +// Read implements the standard Read interface: it reads data from the pipe, +// reading from the internal buffer, otherwise blocking until a writer arrives +// or the write end is closed. If the write end is closed with an error, that +// error is returned as err; otherwise err is io.EOF. +func (r *PipeReader) Read(data []byte) (int, error) { + r.cond.L.Lock() + defer r.cond.L.Unlock() + +RETRY: + n, err := r.buf.Read(data) + // If not closed and no read, wait for writing. + if err == io.EOF && r.rerr == nil && n == 0 { + r.cond.Wait() + goto RETRY + } + if err == io.EOF { + return n, r.rerr + } + return n, err +} + +// Close closes the reader; subsequent writes from the write half of the pipe +// will return error ErrClosedPipe. +func (r *PipeReader) Close() error { + return r.CloseWithError(nil) +} + +// CloseWithError closes the reader; subsequent writes to the write half of the +// pipe will return the error err. +func (r *PipeReader) CloseWithError(err error) error { + r.cond.L.Lock() + defer r.cond.L.Unlock() + + if err == nil { + err = ErrClosedPipe + } + r.werr = err + return nil +} + +// Write implements the standard Write interface: it writes data to the internal +// buffer. If the read end is closed with an error, that err is returned as err; +// otherwise err is ErrClosedPipe. +func (w *PipeWriter) Write(data []byte) (int, error) { + w.cond.L.Lock() + defer w.cond.L.Unlock() + + if w.werr != nil { + return 0, w.werr + } + + n, err := w.buf.Write(data) + w.cond.Signal() + return n, err +} + +// Close closes the writer; subsequent reads from the read half of the pipe will +// return io.EOF once the internal buffer get empty. +func (w *PipeWriter) Close() error { + return w.CloseWithError(nil) +} + +// Close closes the writer; subsequent reads from the read half of the pipe will +// return err once the internal buffer get empty. +func (w *PipeWriter) CloseWithError(err error) error { + w.cond.L.Lock() + defer w.cond.L.Unlock() + + if err == nil { + err = io.EOF + } + w.rerr = err + return nil +} diff --git a/vendor/github.com/acomagu/bufpipe/doc.go b/vendor/github.com/acomagu/bufpipe/doc.go new file mode 100644 index 00000000000..16a39480017 --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/doc.go @@ -0,0 +1,2 @@ +// Package bufpipe provides a IO pipe, has variable-sized buffer. +package bufpipe diff --git a/vendor/github.com/bits-and-blooms/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml deleted file mode 100644 index 094aa5ce070..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go - -sudo: false - -branches: - except: - - release - -branches: - only: - - master - - travis - -go: - - "1.11.x" - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; - - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; - - go get github.com/mattn/goveralls - -before_script: - - make deps - -script: - - make qa - -after_failure: - - cat ./target/test/report.xml - -after_success: - - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/bits-and-blooms/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md deleted file mode 100644 index 97e83071e41..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# bitset - -*Go language library to map between non-negative integers and boolean values* - -[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) -[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc) - - -## Description - -Package bitset implements bitsets, a mapping between non-negative integers and boolean values. -It should be more efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing individual integers. - -But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. - -Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. - -### Example use: - -```go -package main - -import ( - "fmt" - "math/rand" - - "github.com/bits-and-blooms/bitset" -) - -func main() { - fmt.Printf("Hello from BitSet!\n") - var b bitset.BitSet - // play some Go Fish - for i := 0; i < 100; i++ { - card1 := uint(rand.Intn(52)) - card2 := uint(rand.Intn(52)) - b.Set(card1) - if b.Test(card2) { - fmt.Println("Go Fish!") - } - b.Clear(card1) - } - - // Chaining - b.Set(10).Set(11) - - for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { - fmt.Println("The following bit is set:", i) - } - if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { - fmt.Println("Intersection works.") - } else { - fmt.Println("Intersection doesn't work???") - } -} -``` - -As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. - -Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc - -## Memory Usage - -The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). - -## Implementation Note - -Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. - -It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. - -## Installation - -```bash -go get github.com/bits-and-blooms/bitset -``` - -## Contributing - -If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") - -## Running all tests - -Before committing the code, please check if it passes tests, has adequate coverage, etc. -```bash -go test -go test -cover -``` diff --git a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml deleted file mode 100644 index f9b29591840..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Go -# Build your Go project. -# Add steps that test, save build artifacts, deploy, and more: -# https://docs.microsoft.com/azure/devops/pipelines/languages/go - -trigger: -- master - -pool: - vmImage: 'Ubuntu-16.04' - -variables: - GOBIN: '$(GOPATH)/bin' # Go binaries path - GOROOT: '/usr/local/go1.11' # Go installation path - GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path - modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code - -steps: -- script: | - mkdir -p '$(GOBIN)' - mkdir -p '$(GOPATH)/pkg' - mkdir -p '$(modulePath)' - shopt -s extglob - shopt -s dotglob - mv !(gopath) '$(modulePath)' - echo '##vso[task.prependpath]$(GOBIN)' - echo '##vso[task.prependpath]$(GOROOT)/bin' - displayName: 'Set up the Go workspace' - -- script: | - go version - go get -v -t -d ./... - if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure - fi - go build -v . - workingDirectory: '$(modulePath)' - displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/bits-and-blooms/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go deleted file mode 100644 index d688806a54b..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/bitset.go +++ /dev/null @@ -1,952 +0,0 @@ -/* -Package bitset implements bitsets, a mapping -between non-negative integers and boolean values. It should be more -efficient than map[uint] bool. - -It provides methods for setting, clearing, flipping, and testing -individual integers. - -But it also provides set intersection, union, difference, -complement, and symmetric operations, as well as tests to -check whether any, all, or no bits are set, and querying a -bitset's current length and number of positive bits. - -BitSets are expanded to the size of the largest set bit; the -memory allocation is approximately Max bits, where Max is -the largest set bit. BitSets are never shrunk. On creation, -a hint can be given for the number of bits that will be used. - -Many of the methods, including Set,Clear, and Flip, return -a BitSet pointer, which allows for chaining. - -Example use: - - import "bitset" - var b BitSet - b.Set(10).Set(11) - if b.Test(1000) { - b.Clear(1000) - } - if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { - fmt.Println("Intersection works.") - } - -As an alternative to BitSets, one should check out the 'big' package, -which provides a (less set-theoretical) view of bitsets. - -*/ -package bitset - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" -) - -// the wordSize of a bit set -const wordSize = uint(64) - -// log2WordSize is lg(wordSize) -const log2WordSize = uint(6) - -// allBits has every bit set -const allBits uint64 = 0xffffffffffffffff - -// default binary BigEndian -var binaryOrder binary.ByteOrder = binary.BigEndian - -// default json encoding base64.URLEncoding -var base64Encoding = base64.URLEncoding - -// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) -func Base64StdEncoding() { base64Encoding = base64.StdEncoding } - -// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) -func LittleEndian() { binaryOrder = binary.LittleEndian } - -// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. -type BitSet struct { - length uint - set []uint64 -} - -// Error is used to distinguish errors (panics) generated in this package. -type Error string - -// safeSet will fixup b.set to be non-nil and return the field value -func (b *BitSet) safeSet() []uint64 { - if b.set == nil { - b.set = make([]uint64, wordsNeeded(0)) - } - return b.set -} - -// From is a constructor used to create a BitSet from an array of integers -func From(buf []uint64) *BitSet { - return &BitSet{uint(len(buf)) * 64, buf} -} - -// Bytes returns the bitset as array of integers -func (b *BitSet) Bytes() []uint64 { - return b.set -} - -// wordsNeeded calculates the number of words needed for i bits -func wordsNeeded(i uint) int { - if i > (Cap() - wordSize + 1) { - return int(Cap() >> log2WordSize) - } - return int((i + (wordSize - 1)) >> log2WordSize) -} - -// New creates a new BitSet with a hint that length bits will be required -func New(length uint) (bset *BitSet) { - defer func() { - if r := recover(); r != nil { - bset = &BitSet{ - 0, - make([]uint64, 0), - } - } - }() - - bset = &BitSet{ - length, - make([]uint64, wordsNeeded(length)), - } - - return bset -} - -// Cap returns the total possible capacity, or number of bits -func Cap() uint { - return ^uint(0) -} - -// Len returns the number of bits in the BitSet. -// Note the difference to method Count, see example. -func (b *BitSet) Len() uint { - return b.length -} - -// extendSetMaybe adds additional words to incorporate new bits if needed -func (b *BitSet) extendSetMaybe(i uint) { - if i >= b.length { // if we need more bits, make 'em - if i >= Cap() { - panic("You are exceeding the capacity") - } - nsize := wordsNeeded(i + 1) - if b.set == nil { - b.set = make([]uint64, nsize) - } else if cap(b.set) >= nsize { - b.set = b.set[:nsize] // fast resize - } else if len(b.set) < nsize { - newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x - copy(newset, b.set) - b.set = newset - } - b.length = i + 1 - } -} - -// Test whether bit i is set. -func (b *BitSet) Test(i uint) bool { - if i >= b.length { - return false - } - return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 -} - -// Set bit i to 1, the capacity of the bitset is automatically -// increased accordingly. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) Set(i uint) *BitSet { - b.extendSetMaybe(i) - b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) - return b -} - -// Clear bit i to 0 -func (b *BitSet) Clear(i uint) *BitSet { - if i >= b.length { - return b - } - b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) - return b -} - -// SetTo sets bit i to value. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) SetTo(i uint, value bool) *BitSet { - if value { - return b.Set(i) - } - return b.Clear(i) -} - -// Flip bit at i. -// If i>= Cap(), this function will panic. -// Warning: using a very large value for 'i' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) Flip(i uint) *BitSet { - if i >= b.length { - return b.Set(i) - } - b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) - return b -} - -// FlipRange bit in [start, end). -// If end>= Cap(), this function will panic. -// Warning: using a very large value for 'end' -// may lead to a memory shortage and a panic: the caller is responsible -// for providing sensible parameters in line with their memory capacity. -func (b *BitSet) FlipRange(start, end uint) *BitSet { - if start >= end { - return b - } - - b.extendSetMaybe(end - 1) - var startWord uint = start >> log2WordSize - var endWord uint = end >> log2WordSize - b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1))) - for i := startWord; i < endWord; i++ { - b.set[i] = ^b.set[i] - } - b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1)) - return b -} - -// Shrink shrinks BitSet so that the provided value is the last possible -// set value. It clears all bits > the provided index and reduces the size -// and length of the set. -// -// Note that the parameter value is not the new length in bits: it is the -// maximal value that can be stored in the bitset after the function call. -// The new length in bits is the parameter value + 1. Thus it is not possible -// to use this function to set the length to 0, the minimal value of the length -// after this function call is 1. -// -// A new slice is allocated to store the new bits, so you may see an increase in -// memory usage until the GC runs. Normally this should not be a problem, but if you -// have an extremely large BitSet its important to understand that the old BitSet will -// remain in memory until the GC frees it. -func (b *BitSet) Shrink(lastbitindex uint) *BitSet { - length := lastbitindex + 1 - idx := wordsNeeded(length) - if idx > len(b.set) { - return b - } - shrunk := make([]uint64, idx) - copy(shrunk, b.set[:idx]) - b.set = shrunk - b.length = length - b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)))) - return b -} - -// Compact shrinks BitSet to so that we preserve all set bits, while minimizing -// memory usage. Compact calls Shrink. -func (b *BitSet) Compact() *BitSet { - idx := len(b.set) - 1 - for ; idx >= 0 && b.set[idx] == 0; idx-- { - } - newlength := uint((idx + 1) << log2WordSize) - if newlength >= b.length { - return b // nothing to do - } - if newlength > 0 { - return b.Shrink(newlength - 1) - } - // We preserve one word - return b.Shrink(63) -} - -// InsertAt takes an index which indicates where a bit should be -// inserted. Then it shifts all the bits in the set to the left by 1, starting -// from the given index position, and sets the index position to 0. -// -// Depending on the size of your BitSet, and where you are inserting the new entry, -// this method could be extremely slow and in some cases might cause the entire BitSet -// to be recopied. -func (b *BitSet) InsertAt(idx uint) *BitSet { - insertAtElement := (idx >> log2WordSize) - - // if length of set is a multiple of wordSize we need to allocate more space first - if b.isLenExactMultiple() { - b.set = append(b.set, uint64(0)) - } - - var i uint - for i = uint(len(b.set) - 1); i > insertAtElement; i-- { - // all elements above the position where we want to insert can simply by shifted - b.set[i] <<= 1 - - // we take the most significant bit of the previous element and set it as - // the least significant bit of the current element - b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 - } - - // generate a mask to extract the data that we need to shift left - // within the element where we insert a bit - dataMask := ^(uint64(1)< 0x40000 { - buffer.WriteString("...") - break - } - buffer.WriteString(strconv.FormatInt(int64(i), 10)) - i, e = b.NextSet(i + 1) - if e { - buffer.WriteString(",") - } - } - buffer.WriteString("}") - return buffer.String() -} - -// DeleteAt deletes the bit at the given index position from -// within the bitset -// All the bits residing on the left of the deleted bit get -// shifted right by 1 -// The running time of this operation may potentially be -// relatively slow, O(length) -func (b *BitSet) DeleteAt(i uint) *BitSet { - // the index of the slice element where we'll delete a bit - deleteAtElement := i >> log2WordSize - - // generate a mask for the data that needs to be shifted right - // within that slice element that gets modified - dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) - - // extract the data that we'll shift right from the slice element - data := b.set[deleteAtElement] & dataMask - - // set the masked area to 0 while leaving the rest as it is - b.set[deleteAtElement] &= ^dataMask - - // shift the previously extracted data to the right and then - // set it in the previously masked area - b.set[deleteAtElement] |= (data >> 1) & dataMask - - // loop over all the consecutive slice elements to copy each - // lowest bit into the highest position of the previous element, - // then shift the entire content to the right by 1 - for i := int(deleteAtElement) + 1; i < len(b.set); i++ { - b.set[i-1] |= (b.set[i] & 1) << 63 - b.set[i] >>= 1 - } - - b.length = b.length - 1 - - return b -} - -// NextSet returns the next bit set from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no set bit found) -// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} -// -// Users concerned with performance may want to use NextSetMany to -// retrieve several values at once. -func (b *BitSet) NextSet(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - if w != 0 { - return i + trailingZeroes64(w), true - } - x = x + 1 - for x < len(b.set) { - if b.set[x] != 0 { - return uint(x)*wordSize + trailingZeroes64(b.set[x]), true - } - x = x + 1 - - } - return 0, false -} - -// NextSetMany returns many next bit sets from the specified index, -// including possibly the current index and up to cap(buffer). -// If the returned slice has len zero, then no more set bits were found -// -// buffer := make([]uint, 256) // this should be reused -// j := uint(0) -// j, buffer = bitmap.NextSetMany(j, buffer) -// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { -// for k := range buffer { -// do something with buffer[k] -// } -// j += 1 -// } -// -// -// It is possible to retrieve all set bits as follow: -// -// indices := make([]uint, bitmap.Count()) -// bitmap.NextSetMany(0, indices) -// -// However if bitmap.Count() is large, it might be preferable to -// use several calls to NextSetMany, for performance reasons. -func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { - myanswer := buffer - capacity := cap(buffer) - x := int(i >> log2WordSize) - if x >= len(b.set) || capacity == 0 { - return 0, myanswer[:0] - } - skip := i & (wordSize - 1) - word := b.set[x] >> skip - myanswer = myanswer[:capacity] - size := int(0) - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + i - size++ - if size == capacity { - goto End - } - word = word ^ t - } - x++ - for idx, word := range b.set[x:] { - for word != 0 { - r := trailingZeroes64(word) - t := word & ((^word) + 1) - myanswer[size] = r + (uint(x+idx) << 6) - size++ - if size == capacity { - goto End - } - word = word ^ t - } - } -End: - if size > 0 { - return myanswer[size-1], myanswer[:size] - } - return 0, myanswer[:0] -} - -// NextClear returns the next clear bit from the specified index, -// including possibly the current index -// along with an error code (true = valid, false = no bit found i.e. all bits are set) -func (b *BitSet) NextClear(i uint) (uint, bool) { - x := int(i >> log2WordSize) - if x >= len(b.set) { - return 0, false - } - w := b.set[x] - w = w >> (i & (wordSize - 1)) - wA := allBits >> (i & (wordSize - 1)) - index := i + trailingZeroes64(^w) - if w != wA && index < b.length { - return index, true - } - x++ - for x < len(b.set) { - index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) - if b.set[x] != allBits && index < b.length { - return index, true - } - x++ - } - return 0, false -} - -// ClearAll clears the entire BitSet -func (b *BitSet) ClearAll() *BitSet { - if b != nil && b.set != nil { - for i := range b.set { - b.set[i] = 0 - } - } - return b -} - -// wordCount returns the number of words used in a bit set -func (b *BitSet) wordCount() int { - return len(b.set) -} - -// Clone this BitSet -func (b *BitSet) Clone() *BitSet { - c := New(b.length) - if b.set != nil { // Clone should not modify current object - copy(c.set, b.set) - } - return c -} - -// Copy into a destination BitSet -// Returning the size of the destination BitSet -// like array copy -func (b *BitSet) Copy(c *BitSet) (count uint) { - if c == nil { - return - } - if b.set != nil { // Copy should not modify current object - copy(c.set, b.set) - } - count = c.length - if b.length < c.length { - count = b.length - } - return -} - -// Count (number of set bits). -// Also known as "popcount" or "population count". -func (b *BitSet) Count() uint { - if b != nil && b.set != nil { - return uint(popcntSlice(b.set)) - } - return 0 -} - -// Equal tests the equivalence of two BitSets. -// False if they are of different sizes, otherwise true -// only if all the same bits are set -func (b *BitSet) Equal(c *BitSet) bool { - if c == nil || b == nil { - return c == b - } - if b.length != c.length { - return false - } - if b.length == 0 { // if they have both length == 0, then could have nil set - return true - } - // testing for equality shoud not transform the bitset (no call to safeSet) - - for p, v := range b.set { - if c.set[p] != v { - return false - } - } - return true -} - -func panicIfNull(b *BitSet) { - if b == nil { - panic(Error("BitSet must not be null")) - } -} - -// Difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - result = b.Clone() // clone b (in case b is bigger than compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - result.set[i] = b.set[i] &^ compare.set[i] - } - return -} - -// DifferenceCardinality computes the cardinality of the differnce -func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - cnt := uint64(0) - cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) - cnt += popcntSlice(b.set[l:]) - return uint(cnt) -} - -// InPlaceDifference computes the difference of base set and other set -// This is the BitSet equivalent of &^ (and not) -func (b *BitSet) InPlaceDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &^= compare.set[i] - } -} - -// Convenience function: return two bitsets ordered by -// increasing length. Note: neither can be nil -func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { - if a.length <= b.length { - ap, bp = a, b - } else { - ap, bp = b, a - } - return -} - -// Intersection of base set and other set -// This is the BitSet equivalent of & (and) -func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = New(b.length) - for i, word := range b.set { - result.set[i] = word & compare.set[i] - } - return -} - -// IntersectionCardinality computes the cardinality of the union -func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntAndSlice(b.set, compare.set) - return uint(cnt) -} - -// InPlaceIntersection destructively computes the intersection of -// base set and the compare set. -// This is the BitSet equivalent of & (and) -func (b *BitSet) InPlaceIntersection(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &= compare.set[i] - } - for i := l; i < len(b.set); i++ { - b.set[i] = 0 - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } -} - -// Union of base set and other set -// This is the BitSet equivalent of | (or) -func (b *BitSet) Union(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word | compare.set[i] - } - return -} - -// UnionCardinality computes the cardinality of the uniton of the base set -// and the compare set. -func (b *BitSet) UnionCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntOrSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceUnion creates the destructive union of base set and compare set. -// This is the BitSet equivalent of | (or). -func (b *BitSet) InPlaceUnion(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] |= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - // compare is bigger, so clone it - result = compare.Clone() - for i, word := range b.set { - result.set[i] = word ^ compare.set[i] - } - return -} - -// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference -func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { - panicIfNull(b) - panicIfNull(compare) - b, compare = sortByLength(b, compare) - cnt := popcntXorSlice(b.set, compare.set) - if len(compare.set) > len(b.set) { - cnt += popcntSlice(compare.set[len(b.set):]) - } - return uint(cnt) -} - -// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set -// This is the BitSet equivalent of ^ (xor) -func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { - panicIfNull(b) - panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - if compare.length > 0 { - b.extendSetMaybe(compare.length - 1) - } - for i := 0; i < l; i++ { - b.set[i] ^= compare.set[i] - } - if len(compare.set) > l { - for i := l; i < len(compare.set); i++ { - b.set[i] = compare.set[i] - } - } -} - -// Is the length an exact multiple of word sizes? -func (b *BitSet) isLenExactMultiple() bool { - return b.length%wordSize == 0 -} - -// Clean last word by setting unused bits to 0 -func (b *BitSet) cleanLastWord() { - if !b.isLenExactMultiple() { - b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) - } -} - -// Complement computes the (local) complement of a biset (up to length bits) -func (b *BitSet) Complement() (result *BitSet) { - panicIfNull(b) - result = New(b.length) - for i, word := range b.set { - result.set[i] = ^word - } - result.cleanLastWord() - return -} - -// All returns true if all bits are set, false otherwise. Returns true for -// empty sets. -func (b *BitSet) All() bool { - panicIfNull(b) - return b.Count() == b.length -} - -// None returns true if no bit is set, false otherwise. Returns true for -// empty sets. -func (b *BitSet) None() bool { - panicIfNull(b) - if b != nil && b.set != nil { - for _, word := range b.set { - if word > 0 { - return false - } - } - return true - } - return true -} - -// Any returns true if any bit is set, false otherwise -func (b *BitSet) Any() bool { - panicIfNull(b) - return !b.None() -} - -// IsSuperSet returns true if this is a superset of the other set -func (b *BitSet) IsSuperSet(other *BitSet) bool { - for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { - if !b.Test(i) { - return false - } - } - return true -} - -// IsStrictSuperSet returns true if this is a strict superset of the other set -func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { - return b.Count() > other.Count() && b.IsSuperSet(other) -} - -// DumpAsBits dumps a bit set as a string of bits -func (b *BitSet) DumpAsBits() string { - if b.set == nil { - return "." - } - buffer := bytes.NewBufferString("") - i := len(b.set) - 1 - for ; i >= 0; i-- { - fmt.Fprintf(buffer, "%064b.", b.set[i]) - } - return buffer.String() -} - -// BinaryStorageSize returns the binary storage requirements -func (b *BitSet) BinaryStorageSize() int { - return binary.Size(uint64(0)) + binary.Size(b.set) -} - -// WriteTo writes a BitSet to a stream -func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { - length := uint64(b.length) - - // Write length - err := binary.Write(stream, binaryOrder, length) - if err != nil { - return 0, err - } - - // Write set - err = binary.Write(stream, binaryOrder, b.set) - return int64(b.BinaryStorageSize()), err -} - -// ReadFrom reads a BitSet from a stream written using WriteTo -func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { - var length uint64 - - // Read length first - err := binary.Read(stream, binaryOrder, &length) - if err != nil { - return 0, err - } - newset := New(uint(length)) - - if uint64(newset.length) != length { - return 0, errors.New("unmarshalling error: type mismatch") - } - - // Read remaining bytes as set - err = binary.Read(stream, binaryOrder, newset.set) - if err != nil { - return 0, err - } - - *b = *newset - return int64(b.BinaryStorageSize()), nil -} - -// MarshalBinary encodes a BitSet into a binary form and returns the result. -func (b *BitSet) MarshalBinary() ([]byte, error) { - var buf bytes.Buffer - writer := bufio.NewWriter(&buf) - - _, err := b.WriteTo(writer) - if err != nil { - return []byte{}, err - } - - err = writer.Flush() - - return buf.Bytes(), err -} - -// UnmarshalBinary decodes the binary form generated by MarshalBinary. -func (b *BitSet) UnmarshalBinary(data []byte) error { - buf := bytes.NewReader(data) - reader := bufio.NewReader(buf) - - _, err := b.ReadFrom(reader) - - return err -} - -// MarshalJSON marshals a BitSet as a JSON structure -func (b *BitSet) MarshalJSON() ([]byte, error) { - buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) - _, err := b.WriteTo(buffer) - if err != nil { - return nil, err - } - - // URLEncode all bytes - return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) -} - -// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON -func (b *BitSet) UnmarshalJSON(data []byte) error { - // Unmarshal as string - var s string - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - // URLDecode string - buf, err := base64Encoding.DecodeString(s) - if err != nil { - return err - } - - _, err = b.ReadFrom(bytes.NewReader(buf)) - return err -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go deleted file mode 100644 index 76577a83828..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt.go +++ /dev/null @@ -1,53 +0,0 @@ -package bitset - -// bit population count, take from -// https://code.google.com/p/go/issues/detail?id=4988#c11 -// credit: https://code.google.com/u/arnehormann/ -func popcount(x uint64) (n uint64) { - x -= (x >> 1) & 0x5555555555555555 - x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 - x += x >> 4 - x &= 0x0f0f0f0f0f0f0f0f - x *= 0x0101010101010101 - return x >> 56 -} - -func popcntSliceGo(s []uint64) uint64 { - cnt := uint64(0) - for _, x := range s { - cnt += popcount(x) - } - return cnt -} - -func popcntMaskSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] &^ m[i]) - } - return cnt -} - -func popcntAndSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] & m[i]) - } - return cnt -} - -func popcntOrSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] | m[i]) - } - return cnt -} - -func popcntXorSliceGo(s, m []uint64) uint64 { - cnt := uint64(0) - for i := range s { - cnt += popcount(s[i] ^ m[i]) - } - return cnt -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go deleted file mode 100644 index fc8ff4f367c..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func popcntSlice(s []uint64) uint64 { - var cnt int - for _, x := range s { - cnt += bits.OnesCount64(x) - } - return uint64(cnt) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] &^ m[i]) - } - return uint64(cnt) -} - -func popcntAndSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] & m[i]) - } - return uint64(cnt) -} - -func popcntOrSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] | m[i]) - } - return uint64(cnt) -} - -func popcntXorSlice(s, m []uint64) uint64 { - var cnt int - for i := range s { - cnt += bits.OnesCount64(s[i] ^ m[i]) - } - return uint64(cnt) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go deleted file mode 100644 index 4cf64f24ad0..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -package bitset - -// *** the following functions are defined in popcnt_amd64.s - -//go:noescape - -func hasAsm() bool - -// useAsm is a flag used to select the GO or ASM implementation of the popcnt function -var useAsm = hasAsm() - -//go:noescape - -func popcntSliceAsm(s []uint64) uint64 - -//go:noescape - -func popcntMaskSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntAndSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntOrSliceAsm(s, m []uint64) uint64 - -//go:noescape - -func popcntXorSliceAsm(s, m []uint64) uint64 - -func popcntSlice(s []uint64) uint64 { - if useAsm { - return popcntSliceAsm(s) - } - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - if useAsm { - return popcntMaskSliceAsm(s, m) - } - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - if useAsm { - return popcntAndSliceAsm(s, m) - } - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - if useAsm { - return popcntOrSliceAsm(s, m) - } - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - if useAsm { - return popcntXorSliceAsm(s, m) - } - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s deleted file mode 100644 index 666c0dcc17f..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s +++ /dev/null @@ -1,104 +0,0 @@ -// +build !go1.9 -// +build amd64,!appengine - -TEXT ·hasAsm(SB),4,$0-1 -MOVQ $1, AX -CPUID -SHRQ $23, CX -ANDQ $1, CX -MOVB CX, ret+0(FP) -RET - -#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 - -TEXT ·popcntSliceAsm(SB),4,$0-32 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntSliceEnd -popcntSliceLoop: -BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX -ADDQ DX, AX -ADDQ $8, SI -LOOP popcntSliceLoop -popcntSliceEnd: -MOVQ AX, ret+24(FP) -RET - -TEXT ·popcntMaskSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntMaskSliceEnd -MOVQ m+24(FP), DI -popcntMaskSliceLoop: -MOVQ (DI), DX -NOTQ DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntMaskSliceLoop -popcntMaskSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntAndSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntAndSliceEnd -MOVQ m+24(FP), DI -popcntAndSliceLoop: -MOVQ (DI), DX -ANDQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntAndSliceLoop -popcntAndSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntOrSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntOrSliceEnd -MOVQ m+24(FP), DI -popcntOrSliceLoop: -MOVQ (DI), DX -ORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntOrSliceLoop -popcntOrSliceEnd: -MOVQ AX, ret+48(FP) -RET - -TEXT ·popcntXorSliceAsm(SB),4,$0-56 -XORQ AX, AX -MOVQ s+0(FP), SI -MOVQ s_len+8(FP), CX -TESTQ CX, CX -JZ popcntXorSliceEnd -MOVQ m+24(FP), DI -popcntXorSliceLoop: -MOVQ (DI), DX -XORQ (SI), DX -POPCNTQ_DX_DX -ADDQ DX, AX -ADDQ $8, SI -ADDQ $8, DI -LOOP popcntXorSliceLoop -popcntXorSliceEnd: -MOVQ AX, ret+48(FP) -RET diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go deleted file mode 100644 index 21e0ff7b4fc..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !go1.9 -// +build !amd64 appengine - -package bitset - -func popcntSlice(s []uint64) uint64 { - return popcntSliceGo(s) -} - -func popcntMaskSlice(s, m []uint64) uint64 { - return popcntMaskSliceGo(s, m) -} - -func popcntAndSlice(s, m []uint64) uint64 { - return popcntAndSliceGo(s, m) -} - -func popcntOrSlice(s, m []uint64) uint64 { - return popcntOrSliceGo(s, m) -} - -func popcntXorSlice(s, m []uint64) uint64 { - return popcntXorSliceGo(s, m) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go deleted file mode 100644 index c52b61be9fc..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package bitset - -var deBruijn = [...]byte{ - 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, - 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, - 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, - 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, -} - -func trailingZeroes64(v uint64) uint { - return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) -} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go deleted file mode 100644 index 36a988e714d..00000000000 --- a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.9 - -package bitset - -import "math/bits" - -func trailingZeroes64(v uint64) uint { - return uint(bits.TrailingZeros64(v)) -} diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore b/vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore index 6c7385fa2f8..1b87ff10e6b 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/.gitignore @@ -1,5 +1,6 @@ test/test +test/test.coverage test/piggie/piggie -test/phaul +test/phaul/phaul +test/phaul/phaul.coverage image -stats/stats.proto diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/Makefile b/vendor/github.com/checkpoint-restore/go-criu/v5/Makefile index a5c5f5542b8..67c43a05b32 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v5/Makefile +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/Makefile @@ -1,5 +1,12 @@ +SHELL = /bin/bash GO ?= go CC ?= gcc +COVERAGE_PATH ?= $(shell pwd)/.coverage +CRIU_FEATURE_MEM_TRACK = $(shell if criu check --feature mem_dirty_track > /dev/null; then echo 1; else echo 0; fi) +CRIU_FEATURE_LAZY_PAGES = $(shell if criu check --feature uffd-noncoop > /dev/null; then echo 1; else echo 0; fi) +CRIU_FEATURE_PIDFD_STORE = $(shell if criu check --feature pidfd_store > /dev/null; then echo 1; else echo 0; fi) + +export CRIU_FEATURE_MEM_TRACK CRIU_FEATURE_LAZY_PAGES CRIU_FEATURE_PIDFD_STORE all: build test phaul-test @@ -9,13 +16,15 @@ lint: build: $(GO) build -v ./... -TEST_BINARIES := test/test test/piggie/piggie test/phaul/phaul +TEST_PAYLOAD := test/piggie/piggie +TEST_BINARIES := test/test $(TEST_PAYLOAD) test/phaul/phaul +COVERAGE_BINARIES := test/test.coverage test/phaul/phaul.coverage test-bin: $(TEST_BINARIES) test/piggie/piggie: test/piggie/piggie.c $(CC) $^ -o $@ -test/test: test/*.go +test/test: test/main.go $(GO) build -v -o $@ $^ test: $(TEST_BINARIES) @@ -27,7 +36,7 @@ test: $(TEST_BINARIES) } rm -rf image -test/phaul/phaul: test/phaul/*.go +test/phaul/phaul: test/phaul/main.go $(GO) build -v -o $@ $^ phaul-test: $(TEST_BINARIES) @@ -37,10 +46,41 @@ phaul-test: $(TEST_BINARIES) pkill -9 piggie; \ } +test/test.coverage: test/*.go + $(GO) test \ + -covermode=count \ + -coverpkg=./... \ + -mod=vendor \ + -tags coverage \ + -buildmode=pie -c -o $@ $^ + +test/phaul/phaul.coverage: test/phaul/*.go + $(GO) test \ + -covermode=count \ + -coverpkg=./... \ + -mod=vendor \ + -tags coverage \ + -buildmode=pie -c -o $@ $^ + +coverage: $(COVERAGE_BINARIES) $(TEST_PAYLOAD) + mkdir -p $(COVERAGE_PATH) + mkdir -p image + PID=$$(test/piggie/piggie) && { \ + test/test.coverage -test.coverprofile=coverprofile.integration.$$RANDOM -test.outputdir=${COVERAGE_PATH} COVERAGE dump $$PID image && \ + test/test.coverage -test.coverprofile=coverprofile.integration.$$RANDOM -test.outputdir=${COVERAGE_PATH} COVERAGE restore image; \ + pkill -9 piggie; \ + } + rm -rf image + PID=$$(test/piggie/piggie) && { \ + test/phaul/phaul.coverage -test.coverprofile=coverprofile.integration.$$RANDOM -test.outputdir=${COVERAGE_PATH} COVERAGE $$PID; \ + pkill -9 piggie; \ + } + echo "mode: set" > .coverage/coverage.out && cat .coverage/coverprofile* | \ + grep -v mode: | sort -r | awk '{if($$1 != last) {print $$0;last=$$1}}' >> .coverage/coverage.out + clean: - @rm -f $(TEST_BINARIES) - @rm -rf image - @rm -f rpc/rpc.proto stats/stats.proto + @rm -f $(TEST_BINARIES) $(COVERAGE_BINARIES) codecov + @rm -rf image $(COVERAGE_PATH) rpc/rpc.proto: curl -sSL https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/rpc.proto -o $@ @@ -49,14 +89,19 @@ stats/stats.proto: curl -sSL https://raw.githubusercontent.com/checkpoint-restore/criu/master/images/stats.proto -o $@ rpc/rpc.pb.go: rpc/rpc.proto - protoc --go_out=. --go_opt=Mrpc/rpc.proto=rpc/ $^ + protoc --go_out=. --go_opt=M$^=rpc/ $^ stats/stats.pb.go: stats/stats.proto - protoc --go_out=. $^ + protoc --go_out=. --go_opt=M$^=stats/ $^ vendor: GO111MODULE=on $(GO) mod tidy GO111MODULE=on $(GO) mod vendor GO111MODULE=on $(GO) mod verify -.PHONY: build test phaul-test test-bin clean lint vendor +codecov: + curl -Os https://uploader.codecov.io/latest/linux/codecov + chmod +x codecov + ./codecov -f '.coverage/coverage.out' + +.PHONY: build test phaul-test test-bin clean lint vendor coverage codecov diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/README.md b/vendor/github.com/checkpoint-restore/go-criu/v5/README.md index 390da3e98b0..a7483321b5c 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v5/README.md +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/README.md @@ -16,7 +16,7 @@ The following example would print the version of CRIU: import ( "log" - "github.com/checkpoint/restore/go-criu/v5" + "github.com/checkpoint-restore/go-criu/v5" ) func main() { @@ -50,6 +50,7 @@ The following table shows the relation between go-criu and criu versions: | Major version | Latest release | CRIU version | | -------------- | -------------- | ------------ | +| v5             | 5.2.0         | 3.16         | | v5             | 5.0.0         | 3.15         | | v4             | 4.1.0         | 3.14         | diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/features.go b/vendor/github.com/checkpoint-restore/go-criu/v5/features.go new file mode 100644 index 00000000000..c7127f95157 --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/features.go @@ -0,0 +1,45 @@ +package criu + +import ( + "fmt" + + "github.com/checkpoint-restore/go-criu/v5/rpc" +) + +// Feature checking in go-criu is based on the libcriu feature checking function. + +// Feature checking allows the user to check if CRIU supports +// certain features. There are CRIU features which do not depend +// on the version of CRIU but on kernel features or architecture. +// +// One example is memory tracking. Memory tracking can be disabled +// in the kernel or there are architectures which do not support +// it (aarch64 for example). By using the feature check a libcriu +// user can easily query CRIU if a certain feature is available. +// +// The features which should be checked can be marked in the +// structure 'struct criu_feature_check'. Each structure member +// that is set to true will result in CRIU checking for the +// availability of that feature in the current combination of +// CRIU/kernel/architecture. +// +// Available features will be set to true when the function +// returns successfully. Missing features will be set to false. + +func (c *Criu) FeatureCheck(features *rpc.CriuFeatures) (*rpc.CriuFeatures, error) { + resp, err := c.doSwrkWithResp( + rpc.CriuReqType_FEATURE_CHECK, + nil, + nil, + features, + ) + if err != nil { + return nil, err + } + + if resp.GetType() != rpc.CriuReqType_FEATURE_CHECK { + return nil, fmt.Errorf("Unexpected CRIU RPC response") + } + + return features, nil +} diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/main.go b/vendor/github.com/checkpoint-restore/go-criu/v5/main.go index 78811c309ce..88b1b24587a 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v5/main.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/main.go @@ -87,19 +87,19 @@ func (c *Criu) sendAndRecv(reqB []byte) ([]byte, int, error) { } func (c *Criu) doSwrk(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify) error { - resp, err := c.doSwrkWithResp(reqType, opts, nfy) + resp, err := c.doSwrkWithResp(reqType, opts, nfy, nil) if err != nil { return err } respType := resp.GetType() if respType != reqType { - return errors.New("unexpected responce") + return errors.New("unexpected CRIU RPC response") } return nil } -func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify) (*rpc.CriuResp, error) { +func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy Notify, features *rpc.CriuFeatures) (*rpc.CriuResp, error) { var resp *rpc.CriuResp req := rpc.CriuReq{ @@ -111,6 +111,10 @@ func (c *Criu) doSwrkWithResp(reqType rpc.CriuReqType, opts *rpc.CriuOpts, nfy N opts.NotifyScripts = proto.Bool(true) } + if features != nil { + req.Features = features + } + if c.swrkCmd == nil { err := c.Prepare() if err != nil { @@ -209,7 +213,7 @@ func (c *Criu) StartPageServer(opts *rpc.CriuOpts) error { // StartPageServerChld starts the page server and returns PID and port func (c *Criu) StartPageServerChld(opts *rpc.CriuOpts) (int, int, error) { - resp, err := c.doSwrkWithResp(rpc.CriuReqType_PAGE_SERVER_CHLD, opts, nil) + resp, err := c.doSwrkWithResp(rpc.CriuReqType_PAGE_SERVER_CHLD, opts, nil, nil) if err != nil { return 0, 0, err } @@ -220,7 +224,7 @@ func (c *Criu) StartPageServerChld(opts *rpc.CriuOpts) (int, int, error) { // GetCriuVersion executes the VERSION RPC call and returns the version // as an integer. Major * 10000 + Minor * 100 + SubLevel func (c *Criu) GetCriuVersion() (int, error) { - resp, err := c.doSwrkWithResp(rpc.CriuReqType_VERSION, nil, nil) + resp, err := c.doSwrkWithResp(rpc.CriuReqType_VERSION, nil, nil, nil) if err != nil { return 0, err } diff --git a/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go b/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go index 9f22f15393d..15e33fea52f 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v5/rpc/rpc.pb.go @@ -2,8 +2,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.14.0 +// protoc-gen-go v1.27.1 +// protoc v3.12.4 // source: rpc/rpc.proto package rpc diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore index 38b15653c0f..b46162b8ec3 100644 --- a/vendor/github.com/cilium/ebpf/.gitignore +++ b/vendor/github.com/cilium/ebpf/.gitignore @@ -5,6 +5,7 @@ *.so *.dylib *.o +!*_bpf*.o # Test binary, build with `go test -c` *.test diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml index a88374197ec..dc62dd6d0fc 100644 --- a/vendor/github.com/cilium/ebpf/.golangci.yaml +++ b/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -24,6 +24,5 @@ linters: # Could be enabled later: # - gocyclo - # - prealloc # - maligned # - gosec diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md index aee9c0a0d4d..6cbb31b6481 100644 --- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md +++ b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md @@ -57,7 +57,7 @@ Objects loading a spec will fail because the kernel is too old, or a feature is not enabled. There are multiple ways the library deals with that: -* Fallback: older kernels don't allowing naming programs and maps. The library +* Fallback: older kernels don't allow naming programs and maps. The library automatically detects support for names, and omits them during load if necessary. This works since name is primarily a debug aid. @@ -68,7 +68,7 @@ enabled. There are multiple ways the library deals with that: Once program and map objects are loaded they expose the kernel's low-level API, e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer wrappers on top of the low-level API, like `MapIterator`. The low-level API is -useful as an out when our higher-level API doesn't support a particular use case. +useful when our higher-level API doesn't support a particular use case. Links --- diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md index 72ceb437829..0d29eae81e3 100644 --- a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md +++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -6,8 +6,8 @@ are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get a better understanding for the high-level goals. New features must be accompanied by tests. Before starting work on any large -feature, please [join](https://cilium.herokuapp.com/) the -[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack to +feature, please [join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to discuss the design first. When submitting pull requests, consider writing details about what problem you diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile index 5dd342c5b28..0bc15c0810c 100644 --- a/vendor/github.com/cilium/ebpf/Makefile +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -18,11 +18,14 @@ TARGETS := \ testdata/loader-clang-7 \ testdata/loader-clang-9 \ testdata/loader-$(CLANG) \ + testdata/btf_map_init \ testdata/invalid_map \ testdata/raw_tracepoint \ testdata/invalid_map_static \ - testdata/initialized_btf_map \ + testdata/invalid_btf_map_init \ testdata/strings \ + testdata/freplace \ + testdata/iproute2_map_compat \ internal/btf/testdata/relocs .PHONY: all clean docker-all docker-shell diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md index 76c3c303bb1..01e2fff92bb 100644 --- a/vendor/github.com/cilium/ebpf/README.md +++ b/vendor/github.com/cilium/ebpf/README.md @@ -2,28 +2,16 @@ [![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) +![HoneyGopher](.github/images/cilium-ebpf.png) + eBPF is a pure Go library that provides utilities for loading, compiling, and debugging eBPF programs. It has minimal external dependencies and is intended to be used in long running processes. -* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic - assembler -* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF - to various hooks -* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a - `PERF_EVENT_ARRAY` -* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows - compiling and embedding eBPF programs in Go code - The library is maintained by [Cloudflare](https://www.cloudflare.com) and -[Cilium](https://www.cilium.io). Feel free to -[join](https://cilium.herokuapp.com/) the -[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack. - -## Current status +[Cilium](https://www.cilium.io). -The package is production ready, but **the API is explicitly unstable right -now**. Expect to update your code if you want to follow along. +See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem. ## Getting Started @@ -33,21 +21,37 @@ your own tools can be found under [examples/](examples/). Contributions are highly encouraged, as they highlight certain use cases of eBPF and the library, and help shape the future of the project. +## Getting Help + +Please +[join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you +have questions regarding the library. + +## Packages + +This library includes the following packages: + +* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic + assembler, allowing you to write eBPF assembly instructions directly + within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) +* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows + compiling and embedding eBPF programs written in C within Go code. As well as + compiling the C code, it auto-generates Go code for loading and manipulating + the eBPF program and map objects. +* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF + to various hooks +* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a + `PERF_EVENT_ARRAY` +* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a + `BPF_MAP_TYPE_RINGBUF` map + + ## Requirements * A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy) -* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested) - -## Useful resources - -* [eBPF.io](https://ebpf.io) (recommended) -* [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide) - (recommended) -* [Linux documentation on - BPF](https://www.kernel.org/doc/html/latest/networking/filter.html) -* [eBPF features by Linux - version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md) +* Linux >= 4.9. CI is run against LTS releases. ## Regenerating Testdata @@ -60,3 +64,7 @@ The toolchain image build files are kept in [testdata/docker/](testdata/docker/) ## License MIT + +### eBPF Gopher + +The eBPF honeygopher is based on the Go gopher designed by Renee French. diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go index aee2c7ac810..bfa5d59c976 100644 --- a/vendor/github.com/cilium/ebpf/asm/func.go +++ b/vendor/github.com/cilium/ebpf/asm/func.go @@ -184,6 +184,12 @@ const ( FnKtimeGetCoarseNs FnImaInodeHash FnSockFromFile + FnCheckMtu + FnForEachMapElem + FnSnprintf + FnSysBpf + FnBtfFindByNameKind + FnSysClose ) // Call emits a function call. diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go index a712c5da8af..5a0e333639a 100644 --- a/vendor/github.com/cilium/ebpf/asm/func_string.go +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -171,11 +171,17 @@ func _() { _ = x[FnKtimeGetCoarseNs-160] _ = x[FnImaInodeHash-161] _ = x[FnSockFromFile-162] + _ = x[FnCheckMtu-163] + _ = x[FnForEachMapElem-164] + _ = x[FnSnprintf-165] + _ = x[FnSysBpf-166] + _ = x[FnBtfFindByNameKind-167] + _ = x[FnSysClose-168] } -const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFile" +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysClose" -var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424} +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497} func (i BuiltinFunc) String() string { if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go index e7ac0109e2d..64d717d156d 100644 --- a/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -181,6 +181,11 @@ func (ins *Instruction) IsFunctionCall() bool { return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall } +// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. +func (ins *Instruction) IsBuiltinCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 +} + // IsConstantLoad returns true if the instruction loads a constant of the // given size. func (ins *Instruction) IsConstantLoad(size Size) bool { diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go new file mode 100644 index 00000000000..de355ed9092 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -0,0 +1,65 @@ +// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[AttachNone-0] + _ = x[AttachCGroupInetIngress-0] + _ = x[AttachCGroupInetEgress-1] + _ = x[AttachCGroupInetSockCreate-2] + _ = x[AttachCGroupSockOps-3] + _ = x[AttachSkSKBStreamParser-4] + _ = x[AttachSkSKBStreamVerdict-5] + _ = x[AttachCGroupDevice-6] + _ = x[AttachSkMsgVerdict-7] + _ = x[AttachCGroupInet4Bind-8] + _ = x[AttachCGroupInet6Bind-9] + _ = x[AttachCGroupInet4Connect-10] + _ = x[AttachCGroupInet6Connect-11] + _ = x[AttachCGroupInet4PostBind-12] + _ = x[AttachCGroupInet6PostBind-13] + _ = x[AttachCGroupUDP4Sendmsg-14] + _ = x[AttachCGroupUDP6Sendmsg-15] + _ = x[AttachLircMode2-16] + _ = x[AttachFlowDissector-17] + _ = x[AttachCGroupSysctl-18] + _ = x[AttachCGroupUDP4Recvmsg-19] + _ = x[AttachCGroupUDP6Recvmsg-20] + _ = x[AttachCGroupGetsockopt-21] + _ = x[AttachCGroupSetsockopt-22] + _ = x[AttachTraceRawTp-23] + _ = x[AttachTraceFEntry-24] + _ = x[AttachTraceFExit-25] + _ = x[AttachModifyReturn-26] + _ = x[AttachLSMMac-27] + _ = x[AttachTraceIter-28] + _ = x[AttachCgroupInet4GetPeername-29] + _ = x[AttachCgroupInet6GetPeername-30] + _ = x[AttachCgroupInet4GetSockname-31] + _ = x[AttachCgroupInet6GetSockname-32] + _ = x[AttachXDPDevMap-33] + _ = x[AttachCgroupInetSockRelease-34] + _ = x[AttachXDPCPUMap-35] + _ = x[AttachSkLookup-36] + _ = x[AttachXDP-37] + _ = x[AttachSkSKBVerdict-38] + _ = x[AttachSkReuseportSelect-39] + _ = x[AttachSkReuseportSelectOrMigrate-40] + _ = x[AttachPerfEvent-41] +} + +const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent" + +var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610} + +func (i AttachType) String() string { + if i >= AttachType(len(_AttachType_index)-1) { + return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go index 17cc69492ea..2ededc87a05 100644 --- a/vendor/github.com/cilium/ebpf/collection.go +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -1,6 +1,7 @@ package ebpf import ( + "encoding/binary" "errors" "fmt" "io" @@ -25,6 +26,10 @@ type CollectionOptions struct { type CollectionSpec struct { Maps map[string]*MapSpec Programs map[string]*ProgramSpec + + // ByteOrder specifies whether the ELF was compiled for + // big-endian or little-endian architectures. + ByteOrder binary.ByteOrder } // Copy returns a recursive copy of the spec. @@ -34,8 +39,9 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { } cpy := CollectionSpec{ - Maps: make(map[string]*MapSpec, len(cs.Maps)), - Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + Maps: make(map[string]*MapSpec, len(cs.Maps)), + Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + ByteOrder: cs.ByteOrder, } for name, spec := range cs.Maps { @@ -123,7 +129,7 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error buf := make([]byte, len(value)) copy(buf, value) - err := patchValue(buf, btf.MapValue(rodata.BTF), consts) + err := patchValue(buf, rodata.BTF.Value, consts) if err != nil { return err } @@ -134,15 +140,15 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error // Assign the contents of a CollectionSpec to a struct. // -// This function is a short-cut to manually checking the presence -// of maps and programs in a collection spec. Consider using bpf2go if this -// sounds useful. +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. // -// The argument to must be a pointer to a struct. A field of the +// 'to' must be a pointer to a struct. A field of the // struct is updated with values from Programs or Maps if it // has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. -// The tag gives the name of the program or map as found in -// the CollectionSpec. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. // // struct { // Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` @@ -150,42 +156,47 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error // Ignored int // } // -// Returns an error if any of the fields can't be found, or -// if the same map or program is assigned multiple times. +// Returns an error if any of the eBPF objects can't be found, or +// if the same MapSpec or ProgramSpec is assigned multiple times. func (cs *CollectionSpec) Assign(to interface{}) error { - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { + // Assign() only supports assigning ProgramSpecs and MapSpecs, + // so doesn't load any resources into the kernel. + getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { + case reflect.TypeOf((*ProgramSpec)(nil)): - p := cs.Programs[name] - if p == nil { - return reflect.Value{}, fmt.Errorf("missing program %q", name) + if p := cs.Programs[name]; p != nil { + return p, nil } - return reflect.ValueOf(p), nil + return nil, fmt.Errorf("missing program %q", name) + case reflect.TypeOf((*MapSpec)(nil)): - m := cs.Maps[name] - if m == nil { - return reflect.Value{}, fmt.Errorf("missing map %q", name) + if m := cs.Maps[name]; m != nil { + return m, nil } - return reflect.ValueOf(m), nil + return nil, fmt.Errorf("missing map %q", name) + default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) + return nil, fmt.Errorf("unsupported type %s", typ) } } - return assignValues(to, valueOf) + return assignValues(to, getValue) } -// LoadAndAssign maps and programs into the kernel and assign them to a struct. +// LoadAndAssign loads Maps and Programs into the kernel and assigns them +// to a struct. // -// This function is a short-cut to manually checking the presence -// of maps and programs in a collection spec. Consider using bpf2go if this -// sounds useful. +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. // -// The argument to must be a pointer to a struct. A field of the -// struct is updated with values from Programs or Maps if it -// has an `ebpf` tag and its type is *Program or *Map. -// The tag gives the name of the program or map as found in -// the CollectionSpec. +// 'to' must be a pointer to a struct. A field of the struct is updated with +// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as found in the +// CollectionSpec. Before updating the struct, the requested objects and their +// dependent resources are loaded into the kernel and populated with values if +// specified. // // struct { // Foo *ebpf.Program `ebpf:"xdp_foo"` @@ -196,39 +207,53 @@ func (cs *CollectionSpec) Assign(to interface{}) error { // opts may be nil. // // Returns an error if any of the fields can't be found, or -// if the same map or program is assigned multiple times. +// if the same Map or Program is assigned multiple times. func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { - if opts == nil { - opts = &CollectionOptions{} - } - - loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts) - defer cleanup() + loader := newCollectionLoader(cs, opts) + defer loader.cleanup() - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { + // Support assigning Programs and Maps, lazy-loading the required objects. + assignedMaps := make(map[string]bool) + getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { + case reflect.TypeOf((*Program)(nil)): - p, err := loadProgram(name) - if err != nil { - return reflect.Value{}, err - } - return reflect.ValueOf(p), nil + return loader.loadProgram(name) + case reflect.TypeOf((*Map)(nil)): - m, err := loadMap(name) - if err != nil { - return reflect.Value{}, err - } - return reflect.ValueOf(m), nil + assignedMaps[name] = true + return loader.loadMap(name) + default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) + return nil, fmt.Errorf("unsupported type %s", typ) } } - if err := assignValues(to, valueOf); err != nil { + // Load the Maps and Programs requested by the annotated struct. + if err := assignValues(to, getValue); err != nil { return err } - done() + // Populate the requested maps. Has a chance of lazy-loading other dependent maps. + if err := loader.populateMaps(); err != nil { + return err + } + + // Evaluate the loader's objects after all (lazy)loading has taken place. + for n, m := range loader.maps { + switch m.typ { + case ProgramArray: + // Require all lazy-loaded ProgramArrays to be assigned to the given object. + // Without any references, they will be closed on the first GC and all tail + // calls into them will miss. + if !assignedMaps[n] { + return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) + } + } + } + + loader.finalize() + return nil } @@ -246,24 +271,32 @@ func NewCollection(spec *CollectionSpec) (*Collection, error) { // NewCollectionWithOptions creates a Collection from a specification. func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { - loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts) - defer cleanup() + loader := newCollectionLoader(spec, &opts) + defer loader.cleanup() + // Create maps first, as their fds need to be linked into programs. for mapName := range spec.Maps { - _, err := loadMap(mapName) - if err != nil { + if _, err := loader.loadMap(mapName); err != nil { return nil, err } } for progName := range spec.Programs { - _, err := loadProgram(progName) - if err != nil { + if _, err := loader.loadProgram(progName); err != nil { return nil, err } } - maps, progs := done() + // Maps can contain Program and Map stubs, so populate them after + // all Maps and Programs have been successfully loaded. + if err := loader.populateMaps(); err != nil { + return nil, err + } + + maps, progs := loader.maps, loader.programs + + loader.finalize() + return &Collection{ progs, maps, @@ -314,113 +347,154 @@ func (hc handleCache) close() { for _, handle := range hc.btfHandles { handle.Close() } - hc.btfHandles = nil - hc.btfSpecs = nil } -func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) ( - loadMap func(string) (*Map, error), - loadProgram func(string) (*Program, error), - done func() (map[string]*Map, map[string]*Program), - cleanup func(), -) { - var ( - maps = make(map[string]*Map) - progs = make(map[string]*Program) - handles = newHandleCache() - skipMapsAndProgs = false - ) - - cleanup = func() { - handles.close() - - if skipMapsAndProgs { - return - } +type collectionLoader struct { + coll *CollectionSpec + opts *CollectionOptions + maps map[string]*Map + programs map[string]*Program + handles *handleCache +} - for _, m := range maps { - m.Close() - } +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) *collectionLoader { + if opts == nil { + opts = &CollectionOptions{} + } - for _, p := range progs { - p.Close() - } + return &collectionLoader{ + coll, + opts, + make(map[string]*Map), + make(map[string]*Program), + newHandleCache(), + } +} + +// finalize should be called when all the collectionLoader's resources +// have been successfully loaded into the kernel and populated with values. +func (cl *collectionLoader) finalize() { + cl.maps, cl.programs = nil, nil +} + +// cleanup cleans up all resources left over in the collectionLoader. +// Call finalize() when Map and Program creation/population is successful +// to prevent them from getting closed. +func (cl *collectionLoader) cleanup() { + cl.handles.close() + for _, m := range cl.maps { + m.Close() + } + for _, p := range cl.programs { + p.Close() + } +} + +func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { + if m := cl.maps[mapName]; m != nil { + return m, nil + } + + mapSpec := cl.coll.Maps[mapName] + if mapSpec == nil { + return nil, fmt.Errorf("missing map %s", mapName) } - done = func() (map[string]*Map, map[string]*Program) { - skipMapsAndProgs = true - return maps, progs + m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles) + if err != nil { + return nil, fmt.Errorf("map %s: %w", mapName, err) } - loadMap = func(mapName string) (*Map, error) { - if m := maps[mapName]; m != nil { - return m, nil + cl.maps[mapName] = m + return m, nil +} + +func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { + if prog := cl.programs[progName]; prog != nil { + return prog, nil + } + + progSpec := cl.coll.Programs[progName] + if progSpec == nil { + return nil, fmt.Errorf("unknown program %s", progName) + } + + progSpec = progSpec.Copy() + + // Rewrite any reference to a valid map. + for i := range progSpec.Instructions { + ins := &progSpec.Instructions[i] + + if !ins.IsLoadFromMap() || ins.Reference == "" { + continue } - mapSpec := coll.Maps[mapName] - if mapSpec == nil { - return nil, fmt.Errorf("missing map %s", mapName) + if uint32(ins.Constant) != math.MaxUint32 { + // Don't overwrite maps already rewritten, users can + // rewrite programs in the spec themselves + continue } - m, err := newMapWithOptions(mapSpec, opts.Maps, handles) + m, err := cl.loadMap(ins.Reference) if err != nil { - return nil, fmt.Errorf("map %s: %w", mapName, err) + return nil, fmt.Errorf("program %s: %w", progName, err) } - maps[mapName] = m - return m, nil - } - - loadProgram = func(progName string) (*Program, error) { - if prog := progs[progName]; prog != nil { - return prog, nil + fd := m.FD() + if fd < 0 { + return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) } - - progSpec := coll.Programs[progName] - if progSpec == nil { - return nil, fmt.Errorf("unknown program %s", progName) + if err := ins.RewriteMapPtr(m.FD()); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference, err) } + } - progSpec = progSpec.Copy() + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } - // Rewrite any reference to a valid map. - for i := range progSpec.Instructions { - ins := &progSpec.Instructions[i] + cl.programs[progName] = prog + return prog, nil +} - if !ins.IsLoadFromMap() || ins.Reference == "" { - continue - } +func (cl *collectionLoader) populateMaps() error { + for mapName, m := range cl.maps { + mapSpec, ok := cl.coll.Maps[mapName] + if !ok { + return fmt.Errorf("missing map spec %s", mapName) + } - if uint32(ins.Constant) != math.MaxUint32 { - // Don't overwrite maps already rewritten, users can - // rewrite programs in the spec themselves - continue - } + mapSpec = mapSpec.Copy() - m, err := loadMap(ins.Reference) - if err != nil { - return nil, fmt.Errorf("program %s: %w", progName, err) - } + // Replace any object stubs with loaded objects. + for i, kv := range mapSpec.Contents { + switch v := kv.Value.(type) { + case programStub: + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(string(v)) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", v, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} - fd := m.FD() - if fd < 0 { - return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd) - } - if err := ins.RewriteMapPtr(m.FD()); err != nil { - return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err) + case mapStub: + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(string(v)) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", v, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} } } - prog, err := newProgramWithOptions(progSpec, opts.Programs, handles) - if err != nil { - return nil, fmt.Errorf("program %s: %w", progName, err) + // Populate and freeze the map if specified. + if err := m.finalize(mapSpec); err != nil { + return fmt.Errorf("populating map %s: %w", mapName, err) } - - progs[progName] = prog - return prog, nil } - return + return nil } // LoadCollection parses an object file and converts it to a collection. @@ -466,108 +540,81 @@ func (coll *Collection) DetachProgram(name string) *Program { return p } -// Assign the contents of a collection to a struct. -// -// Deprecated: use CollectionSpec.Assign instead. It provides the same -// functionality but creates only the maps and programs requested. -func (coll *Collection) Assign(to interface{}) error { - assignedMaps := make(map[string]struct{}) - assignedPrograms := make(map[string]struct{}) - valueOf := func(typ reflect.Type, name string) (reflect.Value, error) { - switch typ { - case reflect.TypeOf((*Program)(nil)): - p := coll.Programs[name] - if p == nil { - return reflect.Value{}, fmt.Errorf("missing program %q", name) - } - assignedPrograms[name] = struct{}{} - return reflect.ValueOf(p), nil - case reflect.TypeOf((*Map)(nil)): - m := coll.Maps[name] - if m == nil { - return reflect.Value{}, fmt.Errorf("missing map %q", name) - } - assignedMaps[name] = struct{}{} - return reflect.ValueOf(m), nil - default: - return reflect.Value{}, fmt.Errorf("unsupported type %s", typ) - } - } - - if err := assignValues(to, valueOf); err != nil { - return err - } +// structField represents a struct field containing the ebpf struct tag. +type structField struct { + reflect.StructField + value reflect.Value +} - for name := range assignedPrograms { - coll.DetachProgram(name) +// ebpfFields extracts field names tagged with 'ebpf' from a struct type. +// Keep track of visited types to avoid infinite recursion. +func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { + if visited == nil { + visited = make(map[reflect.Type]bool) } - for name := range assignedMaps { - coll.DetachMap(name) + structType := structVal.Type() + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s is not a struct", structType) } - return nil -} - -func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error { - type structField struct { - reflect.StructField - value reflect.Value + if visited[structType] { + return nil, fmt.Errorf("recursion on type %s", structType) } - var ( - fields []structField - visitedTypes = make(map[reflect.Type]bool) - flattenStruct func(reflect.Value) error - ) + fields := make([]structField, 0, structType.NumField()) + for i := 0; i < structType.NumField(); i++ { + field := structField{structType.Field(i), structVal.Field(i)} - flattenStruct = func(structVal reflect.Value) error { - structType := structVal.Type() - if structType.Kind() != reflect.Struct { - return fmt.Errorf("%s is not a struct", structType) - } - - if visitedTypes[structType] { - return fmt.Errorf("recursion on type %s", structType) + // If the field is tagged, gather it and move on. + name := field.Tag.Get("ebpf") + if name != "" { + fields = append(fields, field) + continue } - for i := 0; i < structType.NumField(); i++ { - field := structField{structType.Field(i), structVal.Field(i)} - - name := field.Tag.Get("ebpf") - if name != "" { - fields = append(fields, field) + // If the field does not have an ebpf tag, but is a struct or a pointer + // to a struct, attempt to gather its fields as well. + var v reflect.Value + switch field.Type.Kind() { + case reflect.Ptr: + if field.Type.Elem().Kind() != reflect.Struct { continue } - var err error - switch field.Type.Kind() { - case reflect.Ptr: - if field.Type.Elem().Kind() != reflect.Struct { - continue - } - - if field.value.IsNil() { - return fmt.Errorf("nil pointer to %s", structType) - } + if field.value.IsNil() { + return nil, fmt.Errorf("nil pointer to %s", structType) + } - err = flattenStruct(field.value.Elem()) + // Obtain the destination type of the pointer. + v = field.value.Elem() - case reflect.Struct: - err = flattenStruct(field.value) + case reflect.Struct: + // Reference the value's type directly. + v = field.value - default: - continue - } + default: + continue + } - if err != nil { - return fmt.Errorf("field %s: %w", field.Name, err) - } + inner, err := ebpfFields(v, visited) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) } - return nil + fields = append(fields, inner...) } + return fields, nil +} + +// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. +// +// getValue is called for every tagged field of 'to' and must return the value +// to be assigned to the field with the given typ and name. +func assignValues(to interface{}, + getValue func(typ reflect.Type, name string) (interface{}, error)) error { + toValue := reflect.ValueOf(to) if toValue.Type().Kind() != reflect.Ptr { return fmt.Errorf("%T is not a pointer to struct", to) @@ -577,7 +624,8 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va return fmt.Errorf("nil pointer to %T", to) } - if err := flattenStruct(toValue.Elem()); err != nil { + fields, err := ebpfFields(toValue.Elem(), nil) + if err != nil { return err } @@ -587,19 +635,23 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va name string } - assignedTo := make(map[elem]string) + assigned := make(map[elem]string) for _, field := range fields { - name := field.Tag.Get("ebpf") - if strings.Contains(name, ",") { + // Get string value the field is tagged with. + tag := field.Tag.Get("ebpf") + if strings.Contains(tag, ",") { return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) } - e := elem{field.Type, name} - if assignedField := assignedTo[e]; assignedField != "" { - return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField) + // Check if the eBPF object with the requested + // type and tag was already assigned elsewhere. + e := elem{field.Type, tag} + if af := assigned[e]; af != "" { + return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) } - value, err := valueOf(field.Type, name) + // Get the eBPF object referred to by the tag. + value, err := getValue(field.Type, tag) if err != nil { return fmt.Errorf("field %s: %w", field.Name, err) } @@ -607,9 +659,9 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va if !field.value.CanSet() { return fmt.Errorf("field %s: can't set value", field.Name) } + field.value.Set(reflect.ValueOf(value)) - field.value.Set(value) - assignedTo[e] = field.Name + assigned[e] = field.Name } return nil diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go index c2afbc36a5a..42010f43e58 100644 --- a/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -19,7 +19,7 @@ import ( ) // elfCode is a convenience to reduce the amount of arguments that have to -// be passed around explicitly. You should treat it's contents as immutable. +// be passed around explicitly. You should treat its contents as immutable. type elfCode struct { *internal.SafeELFFile sections map[elf.SectionIndex]*elfSection @@ -188,7 +188,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { return nil, fmt.Errorf("load programs: %w", err) } - return &CollectionSpec{maps, progs}, nil + return &CollectionSpec{maps, progs, ec.ByteOrder}, nil } func loadLicense(sec *elf.Section) (string, error) { @@ -520,8 +520,12 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { return fmt.Errorf("map %s: missing flags", mapName) } - if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil { - return fmt.Errorf("map %s: unknown and non-zero fields in definition", mapName) + extra, err := io.ReadAll(lr) + if err != nil { + return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + } + if len(extra) > 0 { + spec.Extra = *bytes.NewReader(extra) } if err := spec.clampPerfEventArraySize(); err != nil { @@ -535,6 +539,9 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { return nil } +// loadBTFMaps iterates over all ELF sections marked as BTF map sections +// (like .maps) and parses them into MapSpecs. Dump the .maps section and +// any relocations with `readelf -x .maps -r `. func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { for _, sec := range ec.sections { if sec.kind != btfMapSection { @@ -545,33 +552,46 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { return fmt.Errorf("missing BTF") } - _, err := io.Copy(internal.DiscardZeroes{}, bufio.NewReader(sec.Open())) - if err != nil { - return fmt.Errorf("section %v: initializing BTF map definitions: %w", sec.Name, internal.ErrNotSupported) - } - - var ds btf.Datasec + // Each section must appear as a DataSec in the ELF's BTF blob. + var ds *btf.Datasec if err := ec.btf.FindType(sec.Name, &ds); err != nil { return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) } + // Open a Reader to the ELF's raw section bytes so we can assert that all + // of them are zero on a per-map (per-Var) basis. For now, the section's + // sole purpose is to receive relocations, so all must be zero. + rs := sec.Open() + for _, vs := range ds.Vars { + // BPF maps are declared as and assigned to global variables, + // so iterate over each Var in the DataSec and validate their types. v, ok := vs.Type.(*btf.Var) if !ok { return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) } name := string(v.Name) + // The BTF metadata for each Var contains the full length of the map + // declaration, so read the corresponding amount of bytes from the ELF. + // This way, we can pinpoint which map declaration contains unexpected + // (and therefore unsupported) data. + _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) + if err != nil { + return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) + } + if maps[name] != nil { return fmt.Errorf("section %v: map %s already exists", sec.Name, name) } + // Each Var representing a BTF map definition contains a Struct. mapStruct, ok := v.Type.(*btf.Struct) if !ok { return fmt.Errorf("expected struct, got %s", v.Type) } - mapSpec, err := mapSpecFromBTF(name, mapStruct, false, ec.btf) + mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) if err != nil { return fmt.Errorf("map %v: %w", name, err) } @@ -582,32 +602,52 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { maps[name] = mapSpec } + + // Drain the ELF section reader to make sure all bytes are accounted for + // with BTF metadata. + i, err := io.Copy(io.Discard, rs) + if err != nil { + return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) + } + if i > 0 { + return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + } } return nil } +// A programStub is a placeholder for a Program to be inserted at a certain map key. +// It needs to be resolved into a Program later on in the loader process. +type programStub string + +// A mapStub is a placeholder for a Map to be inserted at a certain map key. +// It needs to be resolved into a Map later on in the loader process. +type mapStub string + // mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing // a BTF map definition. The name and spec arguments will be copied to the // resulting MapSpec, and inner must be true on any resursive invocations. -func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*MapSpec, error) { - +func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { var ( - key, value btf.Type - keySize, valueSize uint32 - mapType, flags, maxEntries uint32 - pinType PinType - innerMapSpec *MapSpec - err error + key, value btf.Type + keySize, valueSize uint32 + mapType MapType + flags, maxEntries uint32 + pinType PinType + innerMapSpec *MapSpec + contents []MapKV + err error ) for i, member := range def.Members { switch member.Name { case "type": - mapType, err = uintFromBTF(member.Type) + mt, err := uintFromBTF(member.Type) if err != nil { return nil, fmt.Errorf("can't get type: %w", err) } + mapType = MapType(mt) case "map_flags": flags, err = uintFromBTF(member.Type) @@ -717,7 +757,7 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (* case *btf.Struct: // The values member pointing to an array of structs means we're expecting // a map-in-map declaration. - if MapType(mapType) != ArrayOfMaps && MapType(mapType) != HashOfMaps { + if mapType != ArrayOfMaps && mapType != HashOfMaps { return nil, errors.New("outer map needs to be an array or a hash of maps") } if inner { @@ -731,21 +771,38 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (* // on kernels 5.2 and up) // Pass the BTF spec from the parent object, since both parent and // child must be created from the same BTF blob (on kernels that support BTF). - innerMapSpec, err = mapSpecFromBTF(name+"_inner", t, true, spec) + innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) if err != nil { return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) } + case *btf.FuncProto: + // The values member contains an array of function pointers, meaning an + // autopopulated PROG_ARRAY. + if mapType != ProgramArray { + return nil, errors.New("map needs to be a program array") + } + default: return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) } + contents, err = resolveBTFValuesContents(es, vs, member) + if err != nil { + return nil, fmt.Errorf("resolving values contents: %w", err) + } + default: return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) } } - bm := btf.NewMap(spec, key, value) + if key == nil { + key = &btf.Void{} + } + if value == nil { + value = &btf.Void{} + } return &MapSpec{ Name: SanitizeName(name, -1), @@ -754,9 +811,10 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (* ValueSize: valueSize, MaxEntries: maxEntries, Flags: flags, - BTF: &bm, + BTF: &btf.Map{Spec: spec, Key: key, Value: value}, Pinning: pinType, InnerMap: innerMapSpec, + Contents: contents, }, nil } @@ -793,6 +851,64 @@ func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { return ptr.Target, nil } +// resolveBTFValuesContents resolves relocations into ELF sections belonging +// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map +// definitions to extract static declarations of map contents. +func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { + // The elements of a .values pointer array are not encoded in BTF. + // Instead, relocations are generated into each array index. + // However, it's possible to leave certain array indices empty, so all + // indices' offsets need to be checked for emitted relocations. + + // The offset of the 'values' member within the _struct_ (in bits) + // is the starting point of the array. Convert to bytes. Add VarSecinfo + // offset to get the absolute position in the ELF blob. + start := (member.OffsetBits / 8) + vs.Offset + // 'values' is encoded in BTF as a zero (variable) length struct + // member, and its contents run until the end of the VarSecinfo. + // Add VarSecinfo offset to get the absolute position in the ELF blob. + end := vs.Size + vs.Offset + // The size of an address in this section. This determines the width of + // an index in the array. + align := uint32(es.SectionHeader.Addralign) + + // Check if variable-length section is aligned. + if (end-start)%align != 0 { + return nil, errors.New("unaligned static values section") + } + elems := (end - start) / align + + if elems == 0 { + return nil, nil + } + + contents := make([]MapKV, 0, elems) + + // k is the array index, off is its corresponding ELF section offset. + for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { + r, ok := es.relocations[uint64(off)] + if !ok { + continue + } + + // Relocation exists for the current offset in the ELF section. + // Emit a value stub based on the type of relocation to be replaced by + // a real fd later in the pipeline before populating the map. + // Map keys are encoded in MapKV entries, so empty array indices are + // skipped here. + switch t := elf.ST_TYPE(r.Info); t { + case elf.STT_FUNC: + contents = append(contents, MapKV{uint32(k), programStub(r.Name)}) + case elf.STT_OBJECT: + contents = append(contents, MapKV{uint32(k), mapStub(r.Name)}) + default: + return nil, fmt.Errorf("unknown relocation type %v", t) + } + } + + return contents, nil +} + func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { for _, sec := range ec.sections { if sec.kind != dataSection { @@ -809,9 +925,9 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { return errors.New("data sections require BTF, make sure all consts are marked as static") } - btfMap, err := ec.btf.Datasec(sec.Name) - if err != nil { - return err + var datasec *btf.Datasec + if err := ec.btf.FindType(sec.Name, &datasec); err != nil { + return fmt.Errorf("data section %s: can't get BTF: %w", sec.Name, err) } data, err := sec.Data() @@ -830,7 +946,7 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { ValueSize: uint32(len(data)), MaxEntries: 1, Contents: []MapKV{{uint32(0), data}}, - BTF: btfMap, + BTF: &btf.Map{Spec: ec.btf, Key: &btf.Void{}, Value: datasec}, } switch sec.Name { @@ -855,6 +971,8 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { }{ // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c "socket": {SocketFilter, AttachNone, 0}, + "sk_reuseport/migrate": {SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, + "sk_reuseport": {SkReuseport, AttachSkReuseportSelect, 0}, "seccomp": {SocketFilter, AttachNone, 0}, "kprobe/": {Kprobe, AttachNone, 0}, "uprobe/": {Kprobe, AttachNone, 0}, @@ -884,6 +1002,7 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { "fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, "fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, "sk_lookup/": {SkLookup, AttachSkLookup, 0}, + "freplace/": {Extension, AttachNone, 0}, "lsm/": {LSM, AttachLSMMac, 0}, "lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, @@ -907,6 +1026,11 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0}, "classifier": {SchedCLS, AttachNone, 0}, "action": {SchedACT, AttachNone, 0}, + + "cgroup/getsockname4": {CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, + "cgroup/getsockname6": {CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, + "cgroup/getpeername4": {CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, + "cgroup/getpeername6": {CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, } for prefix, t := range types { diff --git a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go b/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go index d46d135f2fc..5f4e0a0ad02 100644 --- a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go +++ b/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz // Use with https://github.com/dvyukov/go-fuzz diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go index b95131ef572..65fa4d7d850 100644 --- a/vendor/github.com/cilium/ebpf/info.go +++ b/vendor/github.com/cilium/ebpf/info.go @@ -12,6 +12,7 @@ import ( "time" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" ) // MapInfo describes a map. @@ -87,12 +88,16 @@ type ProgramInfo struct { Tag string // Name as supplied by user space at load time. Name string + // BTF for the program. + btf btf.ID + // IDS map ids related to program. + ids []MapID stats *programStats } func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { - info, err := bpfGetProgInfoByFD(fd) + info, err := bpfGetProgInfoByFD(fd, nil) if errors.Is(err, syscall.EINVAL) { return newProgramInfoFromProc(fd) } @@ -100,6 +105,15 @@ func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { return nil, err } + var mapIDs []MapID + if info.nr_map_ids > 0 { + mapIDs = make([]MapID, info.nr_map_ids) + info, err = bpfGetProgInfoByFD(fd, mapIDs) + if err != nil { + return nil, err + } + } + return &ProgramInfo{ Type: ProgramType(info.prog_type), id: ProgramID(info.id), @@ -107,6 +121,8 @@ func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) { Tag: hex.EncodeToString(info.tag[:]), // name is available from 4.15. Name: internal.CString(info.name[:]), + btf: btf.ID(info.btf_id), + ids: mapIDs, stats: &programStats{ runtime: time.Duration(info.run_time_ns), runCount: info.run_cnt, @@ -142,6 +158,17 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) { return pi.id, pi.id > 0 } +// BTFID returns the BTF ID associated with the program. +// +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the program was loaded without BTF information.) +func (pi *ProgramInfo) BTFID() (btf.ID, bool) { + return pi.btf, pi.btf > 0 +} + // RunCount returns the total number of times the program was called. // // Can return 0 if the collection of statistics is not enabled. See EnableStats(). @@ -164,6 +191,13 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) { return time.Duration(0), false } +// MapIDs returns the maps related to the program. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { + return pi.ids, pi.ids != nil +} + func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error { raw, err := fd.Value() if err != nil { diff --git a/vendor/github.com/cilium/ebpf/internal/align.go b/vendor/github.com/cilium/ebpf/internal/align.go new file mode 100644 index 00000000000..8b4f2658eac --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/align.go @@ -0,0 +1,6 @@ +package internal + +// Align returns 'n' updated to 'alignment' boundary. +func Align(n, alignment int) int { + return (int(n) + alignment - 1) / alignment * alignment +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/vendor/github.com/cilium/ebpf/internal/btf/btf.go index 5da9e11921a..2b5f6d226a4 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/btf.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/btf.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "os" "reflect" @@ -27,12 +26,15 @@ var ( ErrNoExtendedInfo = errors.New("no extended info") ) +// ID represents the unique ID of a BTF object. +type ID uint32 + // Spec represents decoded BTF. type Spec struct { rawTypes []rawType strings stringTable types []Type - namedTypes map[string][]namedType + namedTypes map[string][]NamedType funcInfos map[string]extInfo lineInfos map[string]extInfo coreRelos map[string]coreRelos @@ -61,15 +63,6 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { } defer file.Close() - btfSection, btfExtSection, sectionSizes, err := findBtfSections(file) - if err != nil { - return nil, err - } - - if btfSection == nil { - return nil, fmt.Errorf("btf: %w", ErrNotFound) - } - symbols, err := file.Symbols() if err != nil { return nil, fmt.Errorf("can't read symbols: %v", err) @@ -87,10 +80,6 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { } secName := file.Sections[symbol.Section].Name - if _, ok := sectionSizes[secName]; !ok { - continue - } - if symbol.Value > math.MaxUint32 { return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) } @@ -98,24 +87,10 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) } - spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - if btfExtSection == nil { - return spec, nil - } - - spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) - if err != nil { - return nil, fmt.Errorf("can't read ext info: %w", err) - } - - return spec, nil + return loadSpecFromELF(file, variableOffsets) } -func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) { +func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]uint32) (*Spec, error) { var ( btfSection *elf.Section btfExtSection *elf.Section @@ -134,33 +109,45 @@ func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, ma } if sec.Size > math.MaxUint32 { - return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) } sectionSizes[sec.Name] = uint32(sec.Size) } } - return btfSection, btfExtSection, sectionSizes, nil -} -func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) { - file, err := internal.NewSafeELFFile(rd) + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) if err != nil { return nil, err } - defer file.Close() - btfSection, _, _, err := findBtfSections(file) - if err != nil { - return nil, fmt.Errorf(".BTF ELF section: %s", err) + if btfExtSection == nil { + return spec, nil } - if btfSection == nil { - return nil, fmt.Errorf("unable to find .BTF ELF section") + + spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings) + if err != nil { + return nil, fmt.Errorf("can't read ext info: %w", err) } - return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil) + + return spec, nil +} + +// LoadRawSpec reads a blob of BTF data that isn't wrapped in an ELF file. +// +// Prefer using LoadSpecFromReader, since this function only supports a subset +// of BTF. +func LoadRawSpec(btf io.Reader, bo binary.ByteOrder) (*Spec, error) { + // This will return an error if we encounter a Datasec, since we can't fix + // it up. + return loadRawSpec(btf, bo, nil, nil) } -func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { +func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { rawTypes, rawStrings, err := parseBTF(btf, bo) if err != nil { return nil, err @@ -217,7 +204,7 @@ func loadKernelSpec() (*Spec, error) { if err == nil { defer fh.Close() - return loadNakedSpec(fh, internal.NativeEndian, nil, nil) + return LoadRawSpec(fh, internal.NativeEndian) } // use same list of locations as libbpf @@ -241,14 +228,20 @@ func loadKernelSpec() (*Spec, error) { } defer fh.Close() - return loadSpecFromVmlinux(fh) + file, err := internal.NewSafeELFFile(fh) + if err != nil { + return nil, err + } + defer file.Close() + + return loadSpecFromELF(file, nil) } return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) } -func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) { - rawBTF, err := ioutil.ReadAll(btf) +func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) { + rawBTF, err := io.ReadAll(btf) if err != nil { return nil, nil, fmt.Errorf("can't read BTF: %v", err) } @@ -357,6 +350,30 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s return nil } +// Copy creates a copy of Spec. +func (s *Spec) Copy() *Spec { + types, _ := copyTypes(s.types, nil) + namedTypes := make(map[string][]NamedType) + for _, typ := range types { + if named, ok := typ.(NamedType); ok { + name := essentialName(named.TypeName()) + namedTypes[name] = append(namedTypes[name], named) + } + } + + // NB: Other parts of spec are not copied since they are immutable. + return &Spec{ + s.rawTypes, + s.strings, + types, + namedTypes, + s.funcInfos, + s.lineInfos, + s.coreRelos, + s.byteOrder, + } +} + type marshalOpts struct { ByteOrder binary.ByteOrder StripFuncLinkage bool @@ -447,36 +464,37 @@ func (s *Spec) Program(name string, length uint64) (*Program, error) { return &Program{s, length, funcInfos, lineInfos, relos}, nil } -// Datasec returns the BTF required to create maps which represent data sections. -func (s *Spec) Datasec(name string) (*Map, error) { - var datasec Datasec - if err := s.FindType(name, &datasec); err != nil { - return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err) - } - - m := NewMap(s, &Void{}, &datasec) - return &m, nil -} - // FindType searches for a type with a specific name. // -// hint determines the type of the returned Type. +// Called T a type that satisfies Type, typ must be a non-nil **T. +// On success, the address of the found type will be copied in typ. // // Returns an error wrapping ErrNotFound if no matching // type exists in spec. -func (s *Spec) FindType(name string, typ Type) error { - var ( - wanted = reflect.TypeOf(typ) - candidate Type - ) +func (s *Spec) FindType(name string, typ interface{}) error { + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + wanted := typPtr.Type() + if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + var candidate Type for _, typ := range s.namedTypes[essentialName(name)] { if reflect.TypeOf(typ) != wanted { continue } // Match against the full name, not just the essential one. - if typ.name() != name { + if typ.TypeName() != name { continue } @@ -491,15 +509,15 @@ func (s *Spec) FindType(name string, typ Type) error { return fmt.Errorf("type %s: %w", name, ErrNotFound) } - cpy, _ := copyType(candidate, nil) - value := reflect.Indirect(reflect.ValueOf(cpy)) - reflect.Indirect(reflect.ValueOf(typ)).Set(value) + typPtr.Set(reflect.ValueOf(candidate)) + return nil } // Handle is a reference to BTF loaded into the kernel. type Handle struct { - fd *internal.FD + spec *Spec + fd *internal.FD } // NewHandle loads BTF into the kernel. @@ -541,7 +559,32 @@ func NewHandle(spec *Spec) (*Handle, error) { return nil, internal.ErrorWithLog(err, logBuf, logErr) } - return &Handle{fd}, nil + return &Handle{spec.Copy(), fd}, nil +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + fd, err := internal.BPFObjGetFDByID(internal.BPF_BTF_GET_FD_BY_ID, uint32(id)) + if err != nil { + return nil, fmt.Errorf("get BTF by id: %w", err) + } + + info, err := newInfoFromFd(fd) + if err != nil { + _ = fd.Close() + return nil, fmt.Errorf("get BTF spec for handle: %w", err) + } + + return &Handle{info.BTF, fd}, nil +} + +// Spec returns the Spec that defined the BTF loaded into the kernel. +func (h *Handle) Spec() *Spec { + return h.spec } // Close destroys the handle. @@ -563,43 +606,8 @@ func (h *Handle) FD() int { // Map is the BTF for a map. type Map struct { - spec *Spec - key, value Type -} - -// NewMap returns a new Map containing the given values. -// The key and value arguments are initialized to Void if nil values are given. -func NewMap(spec *Spec, key Type, value Type) Map { - if key == nil { - key = &Void{} - } - if value == nil { - value = &Void{} - } - - return Map{ - spec: spec, - key: key, - value: value, - } -} - -// MapSpec should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapSpec(m *Map) *Spec { - return m.spec -} - -// MapKey should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapKey(m *Map) Type { - return m.key -} - -// MapValue should be a method on Map, but is a free function -// to hide it from users of the ebpf package. -func MapValue(m *Map) Type { - return m.value + Spec *Spec + Key, Value Type } // Program is the BTF information for a stream of instructions. @@ -610,68 +618,59 @@ type Program struct { coreRelos coreRelos } -// ProgramSpec returns the Spec needed for loading function and line infos into the kernel. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramSpec(s *Program) *Spec { - return s.spec +// Spec returns the BTF spec of this program. +func (p *Program) Spec() *Spec { + return p.spec } -// ProgramAppend the information from other to the Program. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramAppend(s, other *Program) error { - funcInfos, err := s.funcInfos.append(other.funcInfos, s.length) +// Append the information from other to the Program. +func (p *Program) Append(other *Program) error { + if other.spec != p.spec { + return fmt.Errorf("can't append program with different BTF specs") + } + + funcInfos, err := p.funcInfos.append(other.funcInfos, p.length) if err != nil { return fmt.Errorf("func infos: %w", err) } - lineInfos, err := s.lineInfos.append(other.lineInfos, s.length) + lineInfos, err := p.lineInfos.append(other.lineInfos, p.length) if err != nil { return fmt.Errorf("line infos: %w", err) } - s.funcInfos = funcInfos - s.lineInfos = lineInfos - s.coreRelos = s.coreRelos.append(other.coreRelos, s.length) - s.length += other.length + p.funcInfos = funcInfos + p.lineInfos = lineInfos + p.coreRelos = p.coreRelos.append(other.coreRelos, p.length) + p.length += other.length return nil } -// ProgramFuncInfos returns the binary form of BTF function infos. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) { - bytes, err = s.funcInfos.MarshalBinary() +// FuncInfos returns the binary form of BTF function infos. +func (p *Program) FuncInfos() (recordSize uint32, bytes []byte, err error) { + bytes, err = p.funcInfos.MarshalBinary() if err != nil { - return 0, nil, err + return 0, nil, fmt.Errorf("func infos: %w", err) } - return s.funcInfos.recordSize, bytes, nil + return p.funcInfos.recordSize, bytes, nil } -// ProgramLineInfos returns the binary form of BTF line infos. -// -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) { - bytes, err = s.lineInfos.MarshalBinary() +// LineInfos returns the binary form of BTF line infos. +func (p *Program) LineInfos() (recordSize uint32, bytes []byte, err error) { + bytes, err = p.lineInfos.MarshalBinary() if err != nil { - return 0, nil, err + return 0, nil, fmt.Errorf("line infos: %w", err) } - return s.lineInfos.recordSize, bytes, nil + return p.lineInfos.recordSize, bytes, nil } -// ProgramFixups returns the changes required to adjust the program to the target. +// Fixups returns the changes required to adjust the program to the target. // -// This is a free function instead of a method to hide it from users -// of package ebpf. -func ProgramFixups(s *Program, target *Spec) (COREFixups, error) { - if len(s.coreRelos) == 0 { +// Passing a nil target will relocate against the running kernel. +func (p *Program) Fixups(target *Spec) (COREFixups, error) { + if len(p.coreRelos) == 0 { return nil, nil } @@ -683,7 +682,7 @@ func ProgramFixups(s *Program, target *Spec) (COREFixups, error) { } } - return coreRelocate(s.spec, target, s.coreRelos) + return coreRelocate(p.spec, target, p.coreRelos) } type bpfLoadBTFAttr struct { diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go index a5ef9451201..d98c73ca594 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go @@ -31,6 +31,8 @@ const ( // Added ~5.1 kindVar kindDatasec + // Added ~5.13 + kindFloat ) // FuncLinkage describes BTF function linkage metadata. @@ -54,7 +56,7 @@ const ( const ( btfTypeKindShift = 24 - btfTypeKindLen = 4 + btfTypeKindLen = 5 btfTypeVlenShift = 0 btfTypeVlenMask = 16 btfTypeKindFlagShift = 31 @@ -67,8 +69,8 @@ type btfType struct { /* "info" bits arrangement * bits 0-15: vlen (e.g. # of struct's members), linkage * bits 16-23: unused - * bits 24-27: kind (e.g. int, ptr, array...etc) - * bits 28-30: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused * bit 31: kind_flag, currently used by * struct, union and fwd */ @@ -117,6 +119,8 @@ func (k btfKind) String() string { return "Variable" case kindDatasec: return "Section" + case kindFloat: + return "Float" default: return fmt.Sprintf("Unknown (%d)", k) } @@ -260,6 +264,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { data = new(btfVariable) case kindDatasec: data = make([]btfVarSecinfo, header.Vlen()) + case kindFloat: default: return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind()) } diff --git a/vendor/github.com/cilium/ebpf/internal/btf/core.go b/vendor/github.com/cilium/ebpf/internal/btf/core.go index 7c888f602d0..d02df9d50bb 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/core.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/core.go @@ -234,13 +234,13 @@ func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) { } localType := local.types[id] - named, ok := localType.(namedType) - if !ok || named.name() == "" { + named, ok := localType.(NamedType) + if !ok || named.TypeName() == "" { return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) } relos := relosByID[id] - targets := target.namedTypes[named.essentialName()] + targets := target.namedTypes[essentialName(named.TypeName())] fixups, err := coreCalculateFixups(localType, targets, relos) if err != nil { return nil, fmt.Errorf("relocate %s: %w", localType, err) @@ -262,7 +262,7 @@ var errImpossibleRelocation = errors.New("impossible relocation") // // The best target is determined by scoring: the less poisoning we have to do // the better the target is. -func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]COREFixup, error) { +func coreCalculateFixups(local Type, targets []NamedType, relos coreRelos) ([]COREFixup, error) { localID := local.ID() local, err := copyType(local, skipQualifierAndTypedef) if err != nil { @@ -467,8 +467,8 @@ func parseCoreAccessor(accessor string) (coreAccessor, error) { return nil, fmt.Errorf("empty accessor") } - var result coreAccessor parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) for _, part := range parts { // 31 bits to avoid overflowing int on 32 bit platforms. index, err := strconv.ParseUint(part, 10, 31) @@ -564,7 +564,7 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie // This is an anonymous struct or union, ignore it. local = localMember.Type - localOffset += localMember.Offset + localOffset += localMember.OffsetBits localMaybeFlex = false continue } @@ -585,10 +585,10 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie local = localMember.Type localMaybeFlex = acc == len(localMembers)-1 - localOffset += localMember.Offset + localOffset += localMember.OffsetBits target = targetMember.Type targetMaybeFlex = last - targetOffset += targetMember.Offset + targetOffset += targetMember.OffsetBits case *Array: // For arrays, acc is the index in the target. @@ -639,7 +639,7 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie // coreFindMember finds a member in a composite type while handling anonymous // structs and unions. -func coreFindMember(typ composite, name Name) (Member, bool, error) { +func coreFindMember(typ composite, name string) (Member, bool, error) { if name == "" { return Member{}, false, errors.New("can't search for anonymous member") } @@ -670,7 +670,7 @@ func coreFindMember(typ composite, name Name) (Member, bool, error) { for j, member := range members { if member.Name == name { // NB: This is safe because member is a copy. - member.Offset += target.offset + member.OffsetBits += target.offset return member, j == len(members)-1, nil } @@ -685,7 +685,7 @@ func coreFindMember(typ composite, name Name) (Member, bool, error) { return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) } - targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) + targets = append(targets, offsetTarget{comp, target.offset + member.OffsetBits}) } } @@ -704,9 +704,9 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal return nil, nil, errImpossibleRelocation } - localName := localValue.Name.essentialName() + localName := essentialName(localValue.Name) for i, targetValue := range targetEnum.Values { - if targetValue.Name.essentialName() != localName { + if essentialName(targetValue.Name) != localName { continue } @@ -813,6 +813,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) error { * least one of enums should be anonymous; * - for ENUMs, check sizes, names are ignored; * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * [ NB: coreAreMembersCompatible doesn't recurse, this check is done @@ -848,16 +849,16 @@ func coreAreMembersCompatible(localType Type, targetType Type) error { } switch lv := localType.(type) { - case *Array, *Pointer: + case *Array, *Pointer, *Float: return nil case *Enum: tv := targetType.(*Enum) - return doNamesMatch(lv.name(), tv.name()) + return doNamesMatch(lv.Name, tv.Name) case *Fwd: tv := targetType.(*Fwd) - return doNamesMatch(lv.name(), tv.name()) + return doNamesMatch(lv.Name, tv.Name) case *Int: tv := targetType.(*Int) diff --git a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go index beba1bce698..cdae2ec4082 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" @@ -64,7 +63,7 @@ func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (f // Of course, the .BTF.ext header has different semantics than the // .BTF ext header. We need to ignore non-null values. - _, err = io.CopyN(ioutil.Discard, r, remainder) + _, err = io.CopyN(io.Discard, r, remainder) if err != nil { return nil, nil, nil, fmt.Errorf("header padding: %v", err) } @@ -114,11 +113,16 @@ type extInfoRecord struct { } type extInfo struct { + byteOrder binary.ByteOrder recordSize uint32 records []extInfoRecord } func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { + if other.byteOrder != ei.byteOrder { + return extInfo{}, fmt.Errorf("ext_info byte order mismatch, want %v (got %v)", ei.byteOrder, other.byteOrder) + } + if other.recordSize != ei.recordSize { return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize) } @@ -131,10 +135,14 @@ func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) { Opaque: info.Opaque, }) } - return extInfo{ei.recordSize, records}, nil + return extInfo{ei.byteOrder, ei.recordSize, records}, nil } func (ei extInfo) MarshalBinary() ([]byte, error) { + if ei.byteOrder != internal.NativeEndian { + return nil, fmt.Errorf("%s is not the native byte order", ei.byteOrder) + } + if len(ei.records) == 0 { return nil, nil } @@ -197,6 +205,7 @@ func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[st } result[secName] = extInfo{ + bo, recordSize, records, } diff --git a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go index 37e043fd378..220b285afe0 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz // Use with https://github.com/dvyukov/go-fuzz diff --git a/vendor/github.com/cilium/ebpf/internal/btf/info.go b/vendor/github.com/cilium/ebpf/internal/btf/info.go new file mode 100644 index 00000000000..6a9b5d2e0ba --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/info.go @@ -0,0 +1,48 @@ +package btf + +import ( + "bytes" + + "github.com/cilium/ebpf/internal" +) + +// info describes a BTF object. +type info struct { + BTF *Spec + ID ID + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + // KernelBTF is true if the BTf originated with the kernel and not + // userspace. + KernelBTF bool +} + +func newInfoFromFd(fd *internal.FD) (*info, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + bpfInfo, err := bpfGetBTFInfoByFD(fd, nil, nil) + if err != nil { + return nil, err + } + + btfBuffer := make([]byte, bpfInfo.btfSize) + nameBuffer := make([]byte, bpfInfo.nameLen) + bpfInfo, err = bpfGetBTFInfoByFD(fd, btfBuffer, nameBuffer) + if err != nil { + return nil, err + } + + spec, err := loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, nil, nil) + if err != nil { + return nil, err + } + + return &info{ + BTF: spec, + ID: ID(bpfInfo.id), + Name: internal.CString(nameBuffer), + KernelBTF: bpfInfo.kernelBTF != 0, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/vendor/github.com/cilium/ebpf/internal/btf/strings.go index 8782643a043..9876aa227c0 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/strings.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/strings.go @@ -5,13 +5,12 @@ import ( "errors" "fmt" "io" - "io/ioutil" ) type stringTable []byte func readStringTable(r io.Reader) (stringTable, error) { - contents, err := ioutil.ReadAll(r) + contents, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("can't read string table: %v", err) } @@ -53,8 +52,3 @@ func (st stringTable) Lookup(offset uint32) (string, error) { return string(str[:end]), nil } - -func (st stringTable) LookupName(offset uint32) (Name, error) { - str, err := st.Lookup(offset) - return Name(str), err -} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go b/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go new file mode 100644 index 00000000000..a4f80abd011 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/btf/syscalls.go @@ -0,0 +1,31 @@ +package btf + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +type bpfBTFInfo struct { + btf internal.Pointer + btfSize uint32 + id uint32 + name internal.Pointer + nameLen uint32 + kernelBTF uint32 +} + +func bpfGetBTFInfoByFD(fd *internal.FD, btf, name []byte) (*bpfBTFInfo, error) { + info := bpfBTFInfo{ + btf: internal.NewSlicePointer(btf), + btfSize: uint32(len(btf)), + name: internal.NewSlicePointer(name), + nameLen: uint32(len(name)), + } + if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { + return nil, fmt.Errorf("can't get program info: %w", err) + } + + return &info, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/btf/types.go b/vendor/github.com/cilium/ebpf/internal/btf/types.go index 62aa31bcd78..5c8e7c6e59d 100644 --- a/vendor/github.com/cilium/ebpf/internal/btf/types.go +++ b/vendor/github.com/cilium/ebpf/internal/btf/types.go @@ -30,27 +30,26 @@ type Type interface { walk(*typeDeque) } -// namedType is a type with a name. -// -// Most named types simply embed Name. -type namedType interface { +// NamedType is a type with a name. +type NamedType interface { Type - name() string - essentialName() string -} - -// Name identifies a type. -// -// Anonymous types have an empty name. -type Name string -func (n Name) name() string { - return string(n) + // Name of the type, empty for anonymous types. + TypeName() string } -func (n Name) essentialName() string { - return essentialName(string(n)) -} +var ( + _ NamedType = (*Int)(nil) + _ NamedType = (*Struct)(nil) + _ NamedType = (*Union)(nil) + _ NamedType = (*Enum)(nil) + _ NamedType = (*Fwd)(nil) + _ NamedType = (*Func)(nil) + _ NamedType = (*Typedef)(nil) + _ NamedType = (*Var)(nil) + _ NamedType = (*Datasec)(nil) + _ NamedType = (*Float)(nil) +) // Void is the unit type of BTF. type Void struct{} @@ -72,19 +71,17 @@ const ( // Int is an integer of a given length. type Int struct { TypeID - Name + Name string // The size of the integer in bytes. Size uint32 Encoding IntEncoding - // Offset is the starting bit offset. Currently always 0. + // OffsetBits is the starting bit offset. Currently always 0. // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int - Offset uint32 - Bits byte + OffsetBits uint32 + Bits byte } -var _ namedType = (*Int)(nil) - func (i *Int) String() string { var s strings.Builder @@ -110,15 +107,16 @@ func (i *Int) String() string { return s.String() } -func (i *Int) size() uint32 { return i.Size } -func (i *Int) walk(*typeDeque) {} +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) walk(*typeDeque) {} func (i *Int) copy() Type { cpy := *i return &cpy } func (i *Int) isBitfield() bool { - return i.Offset > 0 + return i.OffsetBits > 0 } // Pointer is a pointer to another type. @@ -158,7 +156,7 @@ func (arr *Array) copy() Type { // Struct is a compound type of consecutive members. type Struct struct { TypeID - Name + Name string // The size of the struct including padding, in bytes Size uint32 Members []Member @@ -168,6 +166,8 @@ func (s *Struct) String() string { return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name) } +func (s *Struct) TypeName() string { return s.Name } + func (s *Struct) size() uint32 { return s.Size } func (s *Struct) walk(tdq *typeDeque) { @@ -189,7 +189,7 @@ func (s *Struct) members() []Member { // Union is a compound type where members occupy the same memory. type Union struct { TypeID - Name + Name string // The size of the union including padding, in bytes. Size uint32 Members []Member @@ -199,6 +199,8 @@ func (u *Union) String() string { return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name) } +func (u *Union) TypeName() string { return u.Name } + func (u *Union) size() uint32 { return u.Size } func (u *Union) walk(tdq *typeDeque) { @@ -236,17 +238,17 @@ var ( // // It is not a valid Type. type Member struct { - Name + Name string Type Type - // Offset is the bit offset of this member - Offset uint32 + // OffsetBits is the bit offset of this member. + OffsetBits uint32 BitfieldSize uint32 } // Enum lists possible values. type Enum struct { TypeID - Name + Name string Values []EnumValue } @@ -254,11 +256,13 @@ func (e *Enum) String() string { return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name) } +func (e *Enum) TypeName() string { return e.Name } + // EnumValue is part of an Enum // // Is is not a valid Type type EnumValue struct { - Name + Name string Value int32 } @@ -294,7 +298,7 @@ func (fk FwdKind) String() string { // Fwd is a forward declaration of a Type. type Fwd struct { TypeID - Name + Name string Kind FwdKind } @@ -302,6 +306,8 @@ func (f *Fwd) String() string { return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name) } +func (f *Fwd) TypeName() string { return f.Name } + func (f *Fwd) walk(*typeDeque) {} func (f *Fwd) copy() Type { cpy := *f @@ -311,7 +317,7 @@ func (f *Fwd) copy() Type { // Typedef is an alias of a Type. type Typedef struct { TypeID - Name + Name string Type Type } @@ -319,6 +325,8 @@ func (td *Typedef) String() string { return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID()) } +func (td *Typedef) TypeName() string { return td.Name } + func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } func (td *Typedef) copy() Type { cpy := *td @@ -379,7 +387,7 @@ func (r *Restrict) copy() Type { // Func is a function definition. type Func struct { TypeID - Name + Name string Type Type Linkage FuncLinkage } @@ -388,6 +396,8 @@ func (f *Func) String() string { return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID()) } +func (f *Func) TypeName() string { return f.Name } + func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } func (f *Func) copy() Type { cpy := *f @@ -426,14 +436,14 @@ func (fp *FuncProto) copy() Type { } type FuncParam struct { - Name + Name string Type Type } // Var is a global variable. type Var struct { TypeID - Name + Name string Type Type Linkage VarLinkage } @@ -442,6 +452,8 @@ func (v *Var) String() string { return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name) } +func (v *Var) TypeName() string { return v.Name } + func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } func (v *Var) copy() Type { cpy := *v @@ -451,7 +463,7 @@ func (v *Var) copy() Type { // Datasec is a global program section containing data. type Datasec struct { TypeID - Name + Name string Size uint32 Vars []VarSecinfo } @@ -460,6 +472,8 @@ func (ds *Datasec) String() string { return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name) } +func (ds *Datasec) TypeName() string { return ds.Name } + func (ds *Datasec) size() uint32 { return ds.Size } func (ds *Datasec) walk(tdq *typeDeque) { @@ -475,7 +489,7 @@ func (ds *Datasec) copy() Type { return &cpy } -// VarSecinfo describes variable in a Datasec +// VarSecinfo describes variable in a Datasec. // // It is not a valid Type. type VarSecinfo struct { @@ -484,6 +498,27 @@ type VarSecinfo struct { Size uint32 } +// Float is a float of a given length. +type Float struct { + TypeID + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) String() string { + return fmt.Sprintf("float%d#%d[%q]", f.Size*8, f.TypeID, f.Name) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) walk(*typeDeque) {} +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + type sizer interface { size() uint32 } @@ -565,14 +600,36 @@ func Sizeof(typ Type) (int, error) { // // Returns any errors from transform verbatim. func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) { - var ( - copies = make(map[Type]Type) - work typeDeque - ) + copies := make(copier) + return typ, copies.copy(&typ, transform) +} - for t := &typ; t != nil; t = work.pop() { +// copy a slice of Types recursively. +// +// Types may form a cycle. +// +// Returns any errors from transform verbatim. +func copyTypes(types []Type, transform func(Type) (Type, error)) ([]Type, error) { + result := make([]Type, len(types)) + copy(result, types) + + copies := make(copier) + for i := range result { + if err := copies.copy(&result[i], transform); err != nil { + return nil, err + } + } + + return result, nil +} + +type copier map[Type]Type + +func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error { + var work typeDeque + for t := typ; t != nil; t = work.pop() { // *t is the identity of the type. - if cpy := copies[*t]; cpy != nil { + if cpy := c[*t]; cpy != nil { *t = cpy continue } @@ -581,21 +638,21 @@ func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) { if transform != nil { tf, err := transform(*t) if err != nil { - return nil, fmt.Errorf("copy %s: %w", typ, err) + return fmt.Errorf("copy %s: %w", *t, err) } cpy = tf.copy() } else { cpy = (*t).copy() } - copies[*t] = cpy + c[*t] = cpy *t = cpy // Mark any nested types for copying. cpy.walk(&work) } - return typ, nil + return nil } // typeDeque keeps track of pointers to types which still @@ -606,6 +663,10 @@ type typeDeque struct { mask uint64 } +func (dq *typeDeque) empty() bool { + return dq.read == dq.write +} + // push adds a type to the stack. func (dq *typeDeque) push(t *Type) { if dq.write-dq.read < uint64(len(dq.types)) { @@ -632,7 +693,7 @@ func (dq *typeDeque) push(t *Type) { // shift returns the first element or null. func (dq *typeDeque) shift() *Type { - if dq.read == dq.write { + if dq.empty() { return nil } @@ -645,7 +706,7 @@ func (dq *typeDeque) shift() *Type { // pop returns the last element or null. func (dq *typeDeque) pop() *Type { - if dq.read == dq.write { + if dq.empty() { return nil } @@ -674,7 +735,7 @@ func (dq *typeDeque) all() []*Type { // Returns a map of named types (so, where NameOff is non-zero) and a slice of types // indexed by TypeID. Since BTF ignores compilation units, multiple types may share // the same name. A Type may form a cyclic graph by pointing at itself. -func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) { +func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]NamedType, err error) { type fixupDef struct { id TypeID expectedKind btfKind @@ -691,17 +752,17 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, // work, since otherwise append might re-allocate members. members := make([]Member, 0, len(raw)) for i, btfMember := range raw { - name, err := rawStrings.LookupName(btfMember.NameOff) + name, err := rawStrings.Lookup(btfMember.NameOff) if err != nil { return nil, fmt.Errorf("can't get name for member %d: %w", i, err) } m := Member{ - Name: name, - Offset: btfMember.Offset, + Name: name, + OffsetBits: btfMember.Offset, } if kindFlag { m.BitfieldSize = btfMember.Offset >> 24 - m.Offset &= 0xffffff + m.OffsetBits &= 0xffffff } members = append(members, m) } @@ -713,7 +774,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, types = make([]Type, 0, len(rawTypes)) types = append(types, (*Void)(nil)) - namedTypes = make(map[string][]namedType) + namedTypes = make(map[string][]NamedType) for i, raw := range rawTypes { var ( @@ -723,7 +784,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, typ Type ) - name, err := rawStrings.LookupName(raw.NameOff) + name, err := rawStrings.Lookup(raw.NameOff) if err != nil { return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err) } @@ -765,7 +826,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, rawvals := raw.data.([]btfEnum) vals := make([]EnumValue, 0, len(rawvals)) for i, btfVal := range rawvals { - name, err := rawStrings.LookupName(btfVal.NameOff) + name, err := rawStrings.Lookup(btfVal.NameOff) if err != nil { return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err) } @@ -812,7 +873,7 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, rawparams := raw.data.([]btfParam) params := make([]FuncParam, 0, len(rawparams)) for i, param := range rawparams { - name, err := rawStrings.LookupName(param.NameOff) + name, err := rawStrings.Lookup(param.NameOff) if err != nil { return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) } @@ -848,14 +909,17 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, } typ = &Datasec{id, name, raw.SizeType, vars} + case kindFloat: + typ = &Float{id, name, raw.Size()} + default: return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) } types = append(types, typ) - if named, ok := typ.(namedType); ok { - if name := essentialName(named.name()); name != "" { + if named, ok := typ.(NamedType); ok { + if name := essentialName(named.TypeName()); name != "" { namedTypes[name] = append(namedTypes[name], named) } } diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go index d3424ba4345..3affa1efb9d 100644 --- a/vendor/github.com/cilium/ebpf/internal/cpu.go +++ b/vendor/github.com/cilium/ebpf/internal/cpu.go @@ -2,7 +2,7 @@ package internal import ( "fmt" - "io/ioutil" + "os" "strings" "sync" ) @@ -24,7 +24,7 @@ func PossibleCPUs() (int, error) { } func parseCPUsFromFile(path string) (int, error) { - spec, err := ioutil.ReadFile(path) + spec, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go index a56fbcc8e01..8c114ddf476 100644 --- a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go +++ b/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go @@ -1,3 +1,4 @@ +//go:build armbe || mips || mips64p32 // +build armbe mips mips64p32 package internal diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go index be2ecfca731..e65a61e45d3 100644 --- a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go +++ b/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go @@ -1,3 +1,4 @@ +//go:build 386 || amd64p32 || arm || mipsle || mips64p32le // +build 386 amd64p32 arm mipsle mips64p32le package internal diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/ptr_64.go index 69452dceb9a..71a3afe307b 100644 --- a/vendor/github.com/cilium/ebpf/internal/ptr_64.go +++ b/vendor/github.com/cilium/ebpf/internal/ptr_64.go @@ -1,5 +1,5 @@ -// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le -// +build !armbe,!mips,!mips64p32 +//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 +// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32 package internal diff --git a/vendor/github.com/cilium/ebpf/internal/syscall.go b/vendor/github.com/cilium/ebpf/internal/syscall.go index b766e643e07..b75037bb9d6 100644 --- a/vendor/github.com/cilium/ebpf/internal/syscall.go +++ b/vendor/github.com/cilium/ebpf/internal/syscall.go @@ -1,6 +1,7 @@ package internal import ( + "errors" "fmt" "path/filepath" "runtime" @@ -68,6 +69,48 @@ func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { return r1, err } +type BPFProgLoadAttr struct { + ProgType uint32 + InsCount uint32 + Instructions Pointer + License Pointer + LogLevel uint32 + LogSize uint32 + LogBuf Pointer + KernelVersion uint32 // since 4.1 2541517c32be + ProgFlags uint32 // since 4.11 e07b98d9bffe + ProgName BPFObjName // since 4.15 067cae47771c + ProgIfIndex uint32 // since 4.15 1f6f4cb7ba21 + ExpectedAttachType uint32 // since 4.17 5e43f899b03a + ProgBTFFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBTFID uint32 + AttachProgFd uint32 +} + +// BPFProgLoad wraps BPF_PROG_LOAD. +func BPFProgLoad(attr *BPFProgLoadAttr) (*FD, error) { + for { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errors.Is(err, unix.EAGAIN) { + continue + } + + if err != nil { + return nil, err + } + + return NewFD(uint32(fd)), nil + } +} + type BPFProgAttachAttr struct { TargetFd uint32 AttachBpfFd uint32 @@ -180,6 +223,22 @@ func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error { return nil } +type bpfGetFDByIDAttr struct { + id uint32 + next uint32 +} + +// BPFObjGetInfoByFD wraps BPF_*_GET_FD_BY_ID. +// +// Available from 4.13. +func BPFObjGetFDByID(cmd BPFCmd, id uint32) (*FD, error) { + attr := bpfGetFDByIDAttr{ + id: id, + } + ptr, err := BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + return NewFD(uint32(ptr)), err +} + // BPFObjName is a null-terminated string made up of // 'A-Za-z0-9_' characters. type BPFObjName [unix.BPF_OBJ_NAME_LEN]byte diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index 0a18eaf0cf8..9aa70fa78c2 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package unix @@ -20,10 +21,11 @@ const ( EPERM = linux.EPERM ESRCH = linux.ESRCH ENODEV = linux.ENODEV + EBADF = linux.EBADF + E2BIG = linux.E2BIG // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) - EBADF = linux.EBADF BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE BPF_F_RDONLY = linux.BPF_F_RDONLY @@ -35,6 +37,9 @@ const ( BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ SYS_BPF = linux.SYS_BPF F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD @@ -69,11 +74,6 @@ type Statfs_t = linux.Statfs_t // Rlimit is a wrapper type Rlimit = linux.Rlimit -// Setrlimit is a wrapper -func Setrlimit(resource int, rlim *Rlimit) (err error) { - return linux.Setrlimit(resource, rlim) -} - // Syscall is a wrapper func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { return linux.Syscall(trap, a1, a2, a3) @@ -202,3 +202,7 @@ func KernelRelease() (string, error) { release := string(uname.Release[:end]) return release, nil } + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index 1b06defc0a0..4f50d896ebb 100644 --- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package unix @@ -21,6 +22,7 @@ const ( ESRCH = syscall.ESRCH ENODEV = syscall.ENODEV EBADF = syscall.Errno(0) + E2BIG = syscall.Errno(0) // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP ENOTSUPP = syscall.Errno(0x20c) @@ -35,6 +37,9 @@ const ( BPF_F_INNER_MAP = 0 BPF_OBJ_NAME_LEN = 0x10 BPF_TAG_SIZE = 0x8 + BPF_RINGBUF_BUSY_BIT = 0 + BPF_RINGBUF_DISCARD_BIT = 0 + BPF_RINGBUF_HDR_SZ = 0 SYS_BPF = 321 F_DUPFD_CLOEXEC = 0x406 EPOLLIN = 0x1 @@ -86,11 +91,6 @@ type Rlimit struct { Max uint64 } -// Setrlimit is a wrapper -func Setrlimit(resource int, rlim *Rlimit) (err error) { - return errNonLinux -} - // Syscall is a wrapper func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { return 0, 0, syscall.Errno(1) @@ -261,3 +261,7 @@ func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags func KernelRelease() (string, error) { return "", errNonLinux } + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux +} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go index 1a678bfe657..4915e583767 100644 --- a/vendor/github.com/cilium/ebpf/internal/version.go +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -2,7 +2,7 @@ package internal import ( "fmt" - "io/ioutil" + "os" "regexp" "sync" @@ -109,7 +109,7 @@ func detectKernelVersion() (Version, error) { // Example format: Ubuntu 4.15.0-91.92-generic 4.15.18 // This method exists in the kernel itself, see d18acd15c // ("perf tools: Fix kernel version error in ubuntu"). - if pvs, err := ioutil.ReadFile("/proc/version_signature"); err == nil { + if pvs, err := os.ReadFile("/proc/version_signature"); err == nil { // If /proc/version_signature exists, failing to parse it is an error. // It only exists on Ubuntu, where the real patch level is not obtainable // through any other method. diff --git a/vendor/github.com/cilium/ebpf/link/freplace.go b/vendor/github.com/cilium/ebpf/link/freplace.go new file mode 100644 index 00000000000..a698e1a9d30 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/freplace.go @@ -0,0 +1,88 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/btf" +) + +type FreplaceLink struct { + RawLink +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (*FreplaceLink, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + info, err := targetProg.Info() + if err != nil { + return nil, err + } + btfID, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("could not get BTF ID for program %s: %w", info.Name, errInvalidInput) + } + btfHandle, err := btf.NewHandleFromID(btfID) + if err != nil { + return nil, err + } + defer btfHandle.Close() + + var function *btf.Func + if err := btfHandle.Spec().FindType(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID = function.ID() + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if err != nil { + return nil, err + } + + return &FreplaceLink{*link}, nil +} + +// Update implements the Link interface. +func (f *FreplaceLink) Update(new *ebpf.Program) error { + return fmt.Errorf("freplace update: %w", ErrNotSupported) +} + +// LoadPinnedFreplace loads a pinned iterator from a bpffs. +func LoadPinnedFreplace(fileName string, opts *ebpf.LoadPinOptions) (*FreplaceLink, error) { + link, err := LoadPinnedRawLink(fileName, TracingType, opts) + if err != nil { + return nil, err + } + + return &FreplaceLink{*link}, err +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go index ea71d6d608a..b6577b5a992 100644 --- a/vendor/github.com/cilium/ebpf/link/kprobe.go +++ b/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -5,7 +5,6 @@ import ( "crypto/rand" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -72,10 +71,11 @@ func (pt probeType) RetprobeBit() (uint64, error) { // given kernel symbol starts executing. See /proc/kallsyms for available // symbols. For example, printk(): // -// Kprobe("printk", prog) +// kp, err := Kprobe("printk", prog) // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. +// Losing the reference to the resulting Link (kp) will close the Kprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. func Kprobe(symbol string, prog *ebpf.Program) (Link, error) { k, err := kprobe(symbol, prog, false) if err != nil { @@ -95,10 +95,11 @@ func Kprobe(symbol string, prog *ebpf.Program) (Link, error) { // before the given kernel symbol exits, with the function stack left intact. // See /proc/kallsyms for available symbols. For example, printk(): // -// Kretprobe("printk", prog) +// kp, err := Kretprobe("printk", prog) // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. +// Losing the reference to the resulting Link (kp) will close the Kretprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) { k, err := kprobe(symbol, prog, true) if err != nil { @@ -157,7 +158,7 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { // pmuKprobe opens a perf event based on the kprobe PMU. // Returns os.ErrNotExist if the given symbol does not exist in the kernel. func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { - return pmuProbe(kprobeType, symbol, "", 0, ret) + return pmuProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) } // pmuProbe opens a perf event based on a Performance Monitoring Unit. @@ -167,7 +168,7 @@ func pmuKprobe(symbol string, ret bool) (*perfEvent, error) { // 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" // // Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU -func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) { +func pmuProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { // Getting the PMU type will fail if the kernel doesn't support // the perf_[k,u]probe PMU. et, err := getPMUEventType(typ) @@ -191,7 +192,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per switch typ { case kprobeType: // Create a pointer to a NUL-terminated string for the kernel. - sp, err := unsafeStringPtr(symbol) + sp, err = unsafeStringPtr(symbol) if err != nil { return nil, err } @@ -202,7 +203,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per Config: config, // Retprobe flag } case uprobeType: - sp, err := unsafeStringPtr(path) + sp, err = unsafeStringPtr(path) if err != nil { return nil, err } @@ -220,7 +221,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per } } - fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL // when trying to create a kretprobe for a missing symbol. Make sure ENOENT @@ -228,6 +229,11 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, os.ErrNotExist) } + // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned + // when attempting to set a uprobe on a trap instruction. + if errors.Is(err, unix.ENOTSUPP) { + return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", offset, err) + } if err != nil { return nil, fmt.Errorf("opening perf event: %w", err) } @@ -246,7 +252,7 @@ func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*per // tracefsKprobe creates a Kprobe tracefs entry. func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { - return tracefsProbe(kprobeType, symbol, "", 0, ret) + return tracefsProbe(kprobeType, symbol, "", 0, perfAllThreads, ret) } // tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. @@ -255,7 +261,7 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) { // Path and offset are only set in the case of uprobe(s) and are used to set // the executable/library path on the filesystem and the offset where the probe is inserted. // A perf event is then opened on the newly-created trace event and returned to the caller. -func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) { +func tracefsProbe(typ probeType, symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { // Generate a random string for each trace event we attempt to create. // This value is used as the 'group' token in tracefs to allow creating // multiple kprobe trace events with the same name. @@ -288,7 +294,7 @@ func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) ( } // Kprobes are ephemeral tracepoints and share the same perf event type. - fd, err := openTracepointPerfEvent(tid) + fd, err := openTracepointPerfEvent(tid, pid) if err != nil { return nil, err } @@ -413,7 +419,7 @@ func probePrefix(ret bool) string { func determineRetprobeBit(typ probeType) (uint64, error) { p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe") - data, err := ioutil.ReadFile(p) + data, err := os.ReadFile(p) if err != nil { return 0, err } diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go index 16cfff415dd..4926584696b 100644 --- a/vendor/github.com/cilium/ebpf/link/link.go +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -6,6 +6,7 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/btf" ) var ErrNotSupported = internal.ErrNotSupported @@ -29,8 +30,8 @@ type Link interface { // Close frees resources. // - // The link will be broken unless it has been pinned. A link - // may continue past the lifetime of the process if Close is + // The link will be broken unless it has been successfully pinned. + // A link may continue past the lifetime of the process if Close is // not called. Close() error @@ -49,6 +50,8 @@ type RawLinkOptions struct { Program *ebpf.Program // Attach must match the attach type of Program. Attach ebpf.AttachType + // BTF is the BTF of the attachment target. + BTF btf.TypeID } // RawLinkInfo contains metadata on a link. @@ -83,9 +86,10 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { } attr := bpfLinkCreateAttr{ - targetFd: uint32(opts.Target), - progFd: uint32(progFd), - attachType: opts.Attach, + targetFd: uint32(opts.Target), + progFd: uint32(progFd), + attachType: opts.Attach, + targetBTFID: uint32(opts.BTF), } fd, err := bpfLinkCreate(&attr) if err != nil { diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go index 5267a47ec9b..7e0443a75cb 100644 --- a/vendor/github.com/cilium/ebpf/link/perf_event.go +++ b/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -236,7 +235,7 @@ func getPMUEventType(typ probeType) (uint64, error) { // openTracepointPerfEvent opens a tracepoint-type perf event. System-wide // [k,u]probes created by writing to /[k,u]probe_events are tracepoints // behind the scenes, and can be attached to using these perf events. -func openTracepointPerfEvent(tid uint64) (*internal.FD, error) { +func openTracepointPerfEvent(tid uint64, pid int) (*internal.FD, error) { attr := unix.PerfEventAttr{ Type: unix.PERF_TYPE_TRACEPOINT, Config: tid, @@ -245,7 +244,7 @@ func openTracepointPerfEvent(tid uint64) (*internal.FD, error) { Wakeup: 1, } - fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) if err != nil { return nil, fmt.Errorf("opening tracepoint perf event: %w", err) } @@ -263,7 +262,7 @@ func uint64FromFile(base string, path ...string) (uint64, error) { return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput) } - data, err := ioutil.ReadFile(p) + data, err := os.ReadFile(p) if err != nil { return 0, fmt.Errorf("reading file %s: %w", p, err) } diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go index 30e8a880507..a61499438b2 100644 --- a/vendor/github.com/cilium/ebpf/link/syscalls.go +++ b/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -88,10 +88,11 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace }) type bpfLinkCreateAttr struct { - progFd uint32 - targetFd uint32 - attachType ebpf.AttachType - flags uint32 + progFd uint32 + targetFd uint32 + attachType ebpf.AttachType + flags uint32 + targetBTFID uint32 } func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) { diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go index b8ae04bf0a0..7423df86b13 100644 --- a/vendor/github.com/cilium/ebpf/link/tracepoint.go +++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -11,7 +11,11 @@ import ( // tracepoints. The top-level directory is the group, the event's subdirectory // is the name. Example: // -// Tracepoint("syscalls", "sys_enter_fork", prog) +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog) +// +// Losing the reference to the resulting Link (tp) will close the Tracepoint +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. // // Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is // only possible as of kernel 4.14 (commit cf5f5ce). @@ -34,7 +38,7 @@ func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { return nil, err } - fd, err := openTracepointPerfEvent(tid) + fd, err := openTracepointPerfEvent(tid, perfAllThreads) if err != nil { return nil, err } diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go index 2bc395ee3cd..59170ce0468 100644 --- a/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -25,14 +25,18 @@ var ( value uint64 err error }{} + + // ErrNoSymbol indicates that the given symbol was not found + // in the ELF symbols table. + ErrNoSymbol = errors.New("not found") ) // Executable defines an executable program on the filesystem. type Executable struct { // Path of the executable on the filesystem. path string - // Parsed ELF symbols and dynamic symbols. - symbols map[string]elf.Symbol + // Parsed ELF symbols and dynamic symbols offsets. + offsets map[string]uint64 } // UprobeOptions defines additional parameters that will be used @@ -41,6 +45,9 @@ type UprobeOptions struct { // Symbol offset. Must be provided in case of external symbols (shared libs). // If set, overrides the offset eventually parsed from the executable. Offset uint64 + // Only set the uprobe on the given process ID. Useful when tracing + // shared library calls or programs that have many running instances. + PID int } // To open a new Executable, use: @@ -64,42 +71,84 @@ func OpenExecutable(path string) (*Executable, error) { return nil, fmt.Errorf("parse ELF file: %w", err) } - var ex = Executable{ - path: path, - symbols: make(map[string]elf.Symbol), + if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN { + // ELF is not an executable or a shared object. + return nil, errors.New("the given file is not an executable or a shared object") } - if err := ex.addSymbols(se.Symbols); err != nil { - return nil, err + + ex := Executable{ + path: path, + offsets: make(map[string]uint64), } - if err := ex.addSymbols(se.DynamicSymbols); err != nil { + if err := ex.load(se); err != nil { return nil, err } return &ex, nil } -func (ex *Executable) addSymbols(f func() ([]elf.Symbol, error)) error { - // elf.Symbols and elf.DynamicSymbols return ErrNoSymbols if the section is not found. - syms, err := f() +func (ex *Executable) load(f *internal.SafeELFFile) error { + syms, err := f.Symbols() if err != nil && !errors.Is(err, elf.ErrNoSymbols) { return err } + + dynsyms, err := f.DynamicSymbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + syms = append(syms, dynsyms...) + for _, s := range syms { if elf.ST_TYPE(s.Info) != elf.STT_FUNC { // Symbol not associated with a function or other executable code. continue } - ex.symbols[s.Name] = s + + off := s.Value + + // Loop over ELF segments. + for _, prog := range f.Progs { + // Skip uninteresting segments. + if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 { + continue + } + + if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) { + // If the symbol value is contained in the segment, calculate + // the symbol offset. + // + // fn symbol offset = fn symbol VA - .text VA + .text offset + // + // stackoverflow.com/a/40249502 + off = s.Value - prog.Vaddr + prog.Off + break + } + } + + ex.offsets[s.Name] = off } + return nil } -func (ex *Executable) symbol(symbol string) (*elf.Symbol, error) { - if s, ok := ex.symbols[symbol]; ok { - return &s, nil +func (ex *Executable) offset(symbol string) (uint64, error) { + if off, ok := ex.offsets[symbol]; ok { + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if off == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s', "+ + "consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported) + } + return off, nil } - return nil, fmt.Errorf("symbol %s not found", symbol) + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) } // Uprobe attaches the given eBPF program to a perf event that fires when the @@ -112,11 +161,14 @@ func (ex *Executable) symbol(symbol string) (*elf.Symbol, error) { // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. Functions provided by shared libraries can currently not -// be traced and will result in an ErrNotSupported. +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { u, err := ex.uprobe(symbol, prog, opts, false) if err != nil { @@ -141,11 +193,14 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. // -// The resulting Link must be Closed during program shutdown to avoid leaking -// system resources. Functions provided by shared libraries can currently not -// be traced and will result in an ErrNotSupported. +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { u, err := ex.uprobe(symbol, prog, opts, true) if err != nil { @@ -175,24 +230,20 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti if opts != nil && opts.Offset != 0 { offset = opts.Offset } else { - sym, err := ex.symbol(symbol) + off, err := ex.offset(symbol) if err != nil { - return nil, fmt.Errorf("symbol '%s' not found: %w", symbol, err) - } - - // Symbols with location 0 from section undef are shared library calls and - // are relocated before the binary is executed. Dynamic linking is not - // implemented by the library, so mark this as unsupported for now. - if sym.Section == elf.SHN_UNDEF && sym.Value == 0 { - return nil, fmt.Errorf("cannot resolve %s library call '%s', "+ - "consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported) + return nil, err } + offset = off + } - offset = sym.Value + pid := perfAllThreads + if opts != nil && opts.PID != 0 { + pid = opts.PID } // Use uprobe PMU if the kernel has it available. - tp, err := pmuUprobe(symbol, ex.path, offset, ret) + tp, err := pmuUprobe(symbol, ex.path, offset, pid, ret) if err == nil { return tp, nil } @@ -201,7 +252,7 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // Use tracefs if uprobe PMU is missing. - tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, ret) + tp, err = tracefsUprobe(uprobeSanitizedSymbol(symbol), ex.path, offset, pid, ret) if err != nil { return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) } @@ -210,13 +261,13 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } // pmuUprobe opens a perf event based on the uprobe PMU. -func pmuUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - return pmuProbe(uprobeType, symbol, path, offset, ret) +func pmuUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { + return pmuProbe(uprobeType, symbol, path, offset, pid, ret) } // tracefsUprobe creates a Uprobe tracefs entry. -func tracefsUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) { - return tracefsProbe(uprobeType, symbol, path, offset, ret) +func tracefsUprobe(symbol, path string, offset uint64, pid int, ret bool) (*perfEvent, error) { + return tracefsProbe(uprobeType, symbol, path, offset, pid, ret) } // uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore. diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go index 6c2efef9e42..f3b1629e70a 100644 --- a/vendor/github.com/cilium/ebpf/linker.go +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal/btf" ) // link resolves bpf-to-bpf calls. @@ -40,7 +39,7 @@ func link(prog *ProgramSpec, libs []*ProgramSpec) error { pending = append(pending, lib.Instructions) if prog.BTF != nil && lib.BTF != nil { - if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil { + if err := prog.BTF.Append(lib.BTF); err != nil { return fmt.Errorf("linking BTF of %s: %w", lib.Name, err) } } @@ -136,5 +135,25 @@ func fixupJumpsAndCalls(insns asm.Instructions) error { } } + // fixupBPFCalls replaces bpf_probe_read_{kernel,user}[_str] with bpf_probe_read[_str] on older kernels + // https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L6009 + iter = insns.Iterate() + for iter.Next() { + ins := iter.Ins + if !ins.IsBuiltinCall() { + continue + } + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + if err := haveProbeReadKernel(); err != nil { + ins.Constant = int64(asm.FnProbeRead) + } + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + if err := haveProbeReadKernel(); err != nil { + ins.Constant = int64(asm.FnProbeReadStr) + } + } + } + return nil } diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go index f257d88c03e..cca387ead01 100644 --- a/vendor/github.com/cilium/ebpf/map.go +++ b/vendor/github.com/cilium/ebpf/map.go @@ -1,6 +1,7 @@ package ebpf import ( + "bytes" "errors" "fmt" "io" @@ -65,6 +66,11 @@ type MapSpec struct { // InnerMap is used as a template for ArrayOfMaps and HashOfMaps InnerMap *MapSpec + // Extra trailing bytes found in the ELF map definition when using structs + // larger than libbpf's bpf_map_def. Must be empty before instantiating + // the MapSpec into a Map. + Extra bytes.Reader + // The BTF associated with this map. BTF *btf.Map } @@ -82,9 +88,12 @@ func (ms *MapSpec) Copy() *MapSpec { } cpy := *ms + cpy.Contents = make([]MapKV, len(ms.Contents)) copy(cpy.Contents, ms.Contents) + cpy.InnerMap = ms.InnerMap.Copy() + return &cpy } @@ -188,14 +197,24 @@ func NewMap(spec *MapSpec) (*Map, error) { // // The caller is responsible for ensuring the process' rlimit is set // sufficiently high for locking memory during map creation. This can be done -// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMapWithOptions. +// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions. // // May return an error wrapping ErrMapIncompatible. func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { handles := newHandleCache() defer handles.close() - return newMapWithOptions(spec, opts, handles) + m, err := newMapWithOptions(spec, opts, handles) + if err != nil { + return nil, fmt.Errorf("creating map: %w", err) + } + + err = m.finalize(spec) + if err != nil { + return nil, fmt.Errorf("populating map: %w", err) + } + + return m, nil } func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) { @@ -207,8 +226,12 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ switch spec.Pinning { case PinByName: - if spec.Name == "" || opts.PinPath == "" { - return nil, fmt.Errorf("pin by name: missing Name or PinPath") + if spec.Name == "" { + return nil, fmt.Errorf("pin by name: missing Name") + } + + if opts.PinPath == "" { + return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath") } path := filepath.Join(opts.PinPath, spec.Name) @@ -244,16 +267,19 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ return nil, errors.New("inner maps cannot be pinned") } - template, err := createMap(spec.InnerMap, nil, opts, handles) + template, err := spec.InnerMap.createMap(nil, opts, handles) if err != nil { - return nil, err + return nil, fmt.Errorf("inner map: %w", err) } defer template.Close() + // Intentionally skip populating and freezing (finalizing) + // the inner map template since it will be removed shortly. + innerFd = template.fd } - m, err := createMap(spec, innerFd, opts, handles) + m, err := spec.createMap(innerFd, opts, handles) if err != nil { return nil, err } @@ -269,7 +295,9 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ return m, nil } -func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { +// createMap validates the spec's properties and creates the map in the kernel +// using the given opts. It does not populate or freeze the map. +func (spec *MapSpec) createMap(inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { closeOnError := func(closer io.Closer) { if err != nil { closer.Close() @@ -278,10 +306,16 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *hand spec = spec.Copy() + // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained + // additional 'inner_map_idx' and later 'numa_node' fields. + // In order to support loading these definitions, tolerate the presence of + // extra bytes, but require them to be zeroes. + if _, err := io.Copy(internal.DiscardZeroes{}, &spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } + switch spec.Type { - case ArrayOfMaps: - fallthrough - case HashOfMaps: + case ArrayOfMaps, HashOfMaps: if err := haveNestedMaps(); err != nil { return nil, err } @@ -350,7 +384,7 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *hand var btfDisabled bool if spec.BTF != nil { - handle, err := handles.btfHandle(btf.MapSpec(spec.BTF)) + handle, err := handles.btfHandle(spec.BTF.Spec) btfDisabled = errors.Is(err, btf.ErrNotSupported) if err != nil && !btfDisabled { return nil, fmt.Errorf("load BTF: %w", err) @@ -358,15 +392,15 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *hand if handle != nil { attr.BTFFd = uint32(handle.FD()) - attr.BTFKeyTypeID = uint32(btf.MapKey(spec.BTF).ID()) - attr.BTFValueTypeID = uint32(btf.MapValue(spec.BTF).ID()) + attr.BTFKeyTypeID = uint32(spec.BTF.Key.ID()) + attr.BTFValueTypeID = uint32(spec.BTF.Value.ID()) } } fd, err := internal.BPFMapCreate(&attr) if err != nil { if errors.Is(err, unix.EPERM) { - return nil, fmt.Errorf("map create: RLIMIT_MEMLOCK may be too low: %w", err) + return nil, fmt.Errorf("map create: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", err) } if btfDisabled { return nil, fmt.Errorf("map create without BTF: %w", err) @@ -380,19 +414,11 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *hand return nil, fmt.Errorf("map create: %w", err) } - if err := m.populate(spec.Contents); err != nil { - return nil, fmt.Errorf("map create: can't set initial contents: %w", err) - } - - if spec.Freeze { - if err := m.Freeze(); err != nil { - return nil, fmt.Errorf("can't freeze map: %w", err) - } - } - return m, nil } +// newMap allocates and returns a new Map structure. +// Sets the fullValueSize on per-CPU maps. func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { m := &Map{ name, @@ -415,7 +441,7 @@ func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEn return nil, err } - m.fullValueSize = align(int(valueSize), 8) * possibleCPUs + m.fullValueSize = internal.Align(int(valueSize), 8) * possibleCPUs return m, nil } @@ -892,12 +918,21 @@ func (m *Map) Freeze() error { return nil } -func (m *Map) populate(contents []MapKV) error { - for _, kv := range contents { +// finalize populates the Map according to the Contents specified +// in spec and freezes the Map if requested by spec. +func (m *Map) finalize(spec *MapSpec) error { + for _, kv := range spec.Contents { if err := m.Put(kv.Key, kv.Value); err != nil { - return fmt.Errorf("key %v: %w", kv.Key, err) + return fmt.Errorf("putting value: key %v: %w", kv.Key, err) } } + + if spec.Freeze { + if err := m.Freeze(); err != nil { + return fmt.Errorf("freezing map: %w", err) + } + } + return nil } @@ -1212,7 +1247,7 @@ func MapGetNextID(startID MapID) (MapID, error) { // // Returns ErrNotExist, if there is no eBPF map with the given id. func NewMapFromID(id MapID) (*Map, error) { - fd, err := bpfObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id)) + fd, err := internal.BPFObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id)) if err != nil { return nil, err } diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go index f2610eff9c7..e461d673d70 100644 --- a/vendor/github.com/cilium/ebpf/marshalers.go +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" "runtime" + "sync" "unsafe" "github.com/cilium/ebpf/internal" @@ -39,6 +40,10 @@ func marshalPtr(data interface{}, length int) (internal.Pointer, error) { // Returns an error if the given value isn't representable in exactly // length bytes. func marshalBytes(data interface{}, length int) (buf []byte, err error) { + if data == nil { + return nil, errors.New("can't marshal a nil value") + } + switch value := data.(type) { case encoding.BinaryMarshaler: buf, err = value.MarshalBinary() @@ -77,22 +82,30 @@ func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) { return internal.NewSlicePointer(buf), buf } +var bytesReaderPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Reader) + }, +} + // unmarshalBytes converts a byte buffer into an arbitrary value. // // Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since // those have special cases that allow more types to be encoded. +// +// The common int32 and int64 types are directly handled to avoid +// unnecessary heap allocations as happening in the default case. func unmarshalBytes(data interface{}, buf []byte) error { switch value := data.(type) { case unsafe.Pointer: - // This could be solved in Go 1.17 by unsafe.Slice instead. (https://github.com/golang/go/issues/19367) - // We could opt for removing unsafe.Pointer support in the lib as well. - sh := &reflect.SliceHeader{ //nolint:govet - Data: uintptr(value), - Len: len(buf), - Cap: len(buf), - } + var dst []byte + // Use unsafe.Slice when we drop support for pre1.17 (https://github.com/golang/go/issues/19367) + // We could opt for removing unsafe.Pointer support in the lib as well + sh := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) + sh.Data = uintptr(value) + sh.Len = len(buf) + sh.Cap = len(buf) - dst := *(*[]byte)(unsafe.Pointer(sh)) copy(dst, buf) runtime.KeepAlive(value) return nil @@ -106,12 +119,38 @@ func unmarshalBytes(data interface{}, buf []byte) error { case *[]byte: *value = buf return nil + case *int32: + if len(buf) < 4 { + return errors.New("int32 requires 4 bytes") + } + *value = int32(internal.NativeEndian.Uint32(buf)) + return nil + case *uint32: + if len(buf) < 4 { + return errors.New("uint32 requires 4 bytes") + } + *value = internal.NativeEndian.Uint32(buf) + return nil + case *int64: + if len(buf) < 8 { + return errors.New("int64 requires 8 bytes") + } + *value = int64(internal.NativeEndian.Uint64(buf)) + return nil + case *uint64: + if len(buf) < 8 { + return errors.New("uint64 requires 8 bytes") + } + *value = internal.NativeEndian.Uint64(buf) + return nil case string: return errors.New("require pointer to string") case []byte: return errors.New("require pointer to []byte") default: - rd := bytes.NewReader(buf) + rd := bytesReaderPool.Get().(*bytes.Reader) + rd.Reset(buf) + defer bytesReaderPool.Put(rd) if err := binary.Read(rd, internal.NativeEndian, value); err != nil { return fmt.Errorf("decoding %T: %v", value, err) } @@ -142,7 +181,7 @@ func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, er return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") } - alignedElemLength := align(elemLength, 8) + alignedElemLength := internal.Align(elemLength, 8) buf := make([]byte, alignedElemLength*possibleCPUs) for i := 0; i < sliceLen; i++ { @@ -212,7 +251,3 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro reflect.ValueOf(slicePtr).Elem().Set(slice) return nil } - -func align(n, alignment int) int { - return (int(n) + alignment - 1) / alignment * alignment -} diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go index 13bdb6ddad1..3549a3fe3f0 100644 --- a/vendor/github.com/cilium/ebpf/prog.go +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -57,16 +57,21 @@ type ProgramSpec struct { // Name is passed to the kernel as a debug aid. Must only contain // alpha numeric and '_' characters. Name string + // Type determines at which hook in the kernel a program will run. Type ProgramType AttachType AttachType - // Name of a kernel data structure to attach to. It's interpretation - // depends on Type and AttachType. - AttachTo string + // Name of a kernel data structure or function to attach to. Its + // interpretation depends on Type and AttachType. + AttachTo string + // The program to attach to. Must be provided manually. + AttachTarget *Program Instructions asm.Instructions + // Flags is passed to the kernel and specifies additional program // load attributes. Flags uint32 + // License of the program. Some helpers are only available if // the license is deemed compatible with the GPL. // @@ -146,7 +151,7 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) { if len(spec.Instructions) == 0 { - return nil, errors.New("Instructions cannot be empty") + return nil, errors.New("instructions cannot be empty") } if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { @@ -166,16 +171,16 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand kv = v.Kernel() } - attr := &bpfProgLoadAttr{ - progType: spec.Type, - progFlags: spec.Flags, - expectedAttachType: spec.AttachType, - license: internal.NewStringPointer(spec.License), - kernelVersion: kv, + attr := &internal.BPFProgLoadAttr{ + ProgType: uint32(spec.Type), + ProgFlags: spec.Flags, + ExpectedAttachType: uint32(spec.AttachType), + License: internal.NewStringPointer(spec.License), + KernelVersion: kv, } if haveObjName() == nil { - attr.progName = internal.NewBPFObjName(spec.Name) + attr.ProgName = internal.NewBPFObjName(spec.Name) } var err error @@ -190,35 +195,35 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand var btfDisabled bool var core btf.COREFixups if spec.BTF != nil { - core, err = btf.ProgramFixups(spec.BTF, targetBTF) + core, err = spec.BTF.Fixups(targetBTF) if err != nil { return nil, fmt.Errorf("CO-RE relocations: %w", err) } - handle, err := handles.btfHandle(btf.ProgramSpec(spec.BTF)) + handle, err := handles.btfHandle(spec.BTF.Spec()) btfDisabled = errors.Is(err, btf.ErrNotSupported) if err != nil && !btfDisabled { return nil, fmt.Errorf("load BTF: %w", err) } if handle != nil { - attr.progBTFFd = uint32(handle.FD()) + attr.ProgBTFFd = uint32(handle.FD()) - recSize, bytes, err := btf.ProgramLineInfos(spec.BTF) + recSize, bytes, err := spec.BTF.LineInfos() if err != nil { return nil, fmt.Errorf("get BTF line infos: %w", err) } - attr.lineInfoRecSize = recSize - attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.lineInfo = internal.NewSlicePointer(bytes) + attr.LineInfoRecSize = recSize + attr.LineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) + attr.LineInfo = internal.NewSlicePointer(bytes) - recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF) + recSize, bytes, err = spec.BTF.FuncInfos() if err != nil { return nil, fmt.Errorf("get BTF function infos: %w", err) } - attr.funcInfoRecSize = recSize - attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) - attr.funcInfo = internal.NewSlicePointer(bytes) + attr.FuncInfoRecSize = recSize + attr.FuncInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize)) + attr.FuncInfo = internal.NewSlicePointer(bytes) } } @@ -238,16 +243,41 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand } bytecode := buf.Bytes() - attr.instructions = internal.NewSlicePointer(bytecode) - attr.insCount = uint32(len(bytecode) / asm.InstructionSize) + attr.Instructions = internal.NewSlicePointer(bytecode) + attr.InsCount = uint32(len(bytecode) / asm.InstructionSize) if spec.AttachTo != "" { + if spec.AttachTarget != nil { + info, err := spec.AttachTarget.Info() + if err != nil { + return nil, fmt.Errorf("load target BTF: %w", err) + } + + btfID, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("load target BTF: no BTF info available") + } + btfHandle, err := btf.NewHandleFromID(btfID) + if err != nil { + return nil, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + targetBTF = btfHandle.Spec() + if err != nil { + return nil, fmt.Errorf("load target BTF: %w", err) + } + } + target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType) if err != nil { return nil, err } if target != nil { - attr.attachBTFID = target.ID() + attr.AttachBTFID = uint32(target.ID()) + } + if spec.AttachTarget != nil { + attr.AttachProgFd = uint32(spec.AttachTarget.FD()) } } @@ -259,12 +289,12 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand var logBuf []byte if opts.LogLevel > 0 { logBuf = make([]byte, logSize) - attr.logLevel = opts.LogLevel - attr.logSize = uint32(len(logBuf)) - attr.logBuf = internal.NewSlicePointer(logBuf) + attr.LogLevel = opts.LogLevel + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = internal.NewSlicePointer(logBuf) } - fd, err := bpfProgLoad(attr) + fd, err := internal.BPFProgLoad(attr) if err == nil { return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil } @@ -273,17 +303,20 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand if opts.LogLevel == 0 && opts.LogSize >= 0 { // Re-run with the verifier enabled to get better error messages. logBuf = make([]byte, logSize) - attr.logLevel = 1 - attr.logSize = uint32(len(logBuf)) - attr.logBuf = internal.NewSlicePointer(logBuf) + attr.LogLevel = 1 + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = internal.NewSlicePointer(logBuf) - _, logErr = bpfProgLoad(attr) + fd, logErr = internal.BPFProgLoad(attr) + if logErr == nil { + fd.Close() + } } if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 { // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can // check that the log is empty to reduce false positives. - return nil, fmt.Errorf("load program: RLIMIT_MEMLOCK may be too low: %w", logErr) + return nil, fmt.Errorf("load program: %w (MEMLOCK bay be too low, consider rlimit.RemoveMemlock)", logErr) } err = internal.ErrorWithLog(err, logBuf, logErr) @@ -310,7 +343,7 @@ func NewProgramFromFD(fd int) (*Program, error) { // // Returns ErrNotExist, if there is no eBPF program with the given id. func NewProgramFromID(id ProgramID) (*Program, error) { - fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) + fd, err := internal.BPFObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id)) if err != nil { return nil, fmt.Errorf("get program by id: %w", err) } @@ -677,45 +710,44 @@ func ProgramGetNextID(startID ProgramID) (ProgramID, error) { // // Deprecated: use ProgramInfo.ID() instead. func (p *Program) ID() (ProgramID, error) { - info, err := bpfGetProgInfoByFD(p.fd) + info, err := bpfGetProgInfoByFD(p.fd, nil) if err != nil { return ProgramID(0), err } return ProgramID(info.id), nil } -func resolveBTFType(kernel *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { +func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { type match struct { p ProgramType a AttachType } - var target btf.Type var typeName, featureName string switch (match{progType, attachType}) { case match{LSM, AttachLSMMac}: - target = new(btf.Func) typeName = "bpf_lsm_" + name featureName = name + " LSM hook" - case match{Tracing, AttachTraceIter}: - target = new(btf.Func) typeName = "bpf_iter_" + name featureName = name + " iterator" - + case match{Extension, AttachNone}: + typeName = name + featureName = fmt.Sprintf("freplace %s", name) default: return nil, nil } - if kernel == nil { + if spec == nil { var err error - kernel, err = btf.LoadKernelSpec() + spec, err = btf.LoadKernelSpec() if err != nil { return nil, fmt.Errorf("load kernel spec: %w", err) } } - err := kernel.FindType(typeName, target) + var target *btf.Func + err := spec.FindType(typeName, &target) if errors.Is(err, btf.ErrNotFound) { return nil, &internal.UnsupportedFeatureError{ Name: featureName, @@ -724,5 +756,6 @@ func resolveBTFType(kernel *btf.Spec, name string, progType ProgramType, attachT if err != nil { return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err) } + return target, nil } diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh index e2437beed2a..a079edc7e18 100644 --- a/vendor/github.com/cilium/ebpf/run-tests.sh +++ b/vendor/github.com/cilium/ebpf/run-tests.sh @@ -5,7 +5,7 @@ # Run all tests on a 5.4 kernel # $ ./run-tests.sh 5.4 # Run a subset of tests: -# $ ./run-tests.sh 5.4 go test ./link +# $ ./run-tests.sh 5.4 ./link set -euo pipefail @@ -48,15 +48,17 @@ if [[ "${1:-}" = "--exec-vm" ]]; then rm "${output}/fake-stdin" fi - $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ - --rwdir="${testdir}=${testdir}" \ - --rodir=/run/input="${input}" \ - --rwdir=/run/output="${output}" \ - --script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \ - --qemu-opts -smp 2 # need at least two CPUs for some tests + if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ + --rwdir="${testdir}=${testdir}" \ + --rodir=/run/input="${input}" \ + --rwdir=/run/output="${output}" \ + --script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \ + --kopt possible_cpus=2; then # need at least two CPUs for some tests + exit 23 + fi if [[ ! -e "${output}/success" ]]; then - exit 1 + exit 42 fi $sudo rm -r "$output" @@ -74,7 +76,7 @@ elif [[ "${1:-}" = "--exec-test" ]]; then dmesg -C if ! "$@"; then dmesg - exit 1 + exit 1 # this return code is "swallowed" by qemu fi touch "/run/output/success" exit 0 @@ -108,7 +110,7 @@ else echo "No selftests found, disabling" fi -args=(-v -short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) +args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) if (( $# > 0 )); then args=("$@") fi diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go index f5a38549bb7..f8cb5f0e0cd 100644 --- a/vendor/github.com/cilium/ebpf/syscalls.go +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -1,13 +1,14 @@ package ebpf import ( + "bytes" "errors" "fmt" "os" "unsafe" + "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" "github.com/cilium/ebpf/internal/unix" ) @@ -73,30 +74,6 @@ type bpfMapInfo struct { btf_value_type_id uint32 } -type bpfProgLoadAttr struct { - progType ProgramType - insCount uint32 - instructions internal.Pointer - license internal.Pointer - logLevel uint32 - logSize uint32 - logBuf internal.Pointer - kernelVersion uint32 // since 4.1 2541517c32be - progFlags uint32 // since 4.11 e07b98d9bffe - progName internal.BPFObjName // since 4.15 067cae47771c - progIfIndex uint32 // since 4.15 1f6f4cb7ba21 - expectedAttachType AttachType // since 4.17 5e43f899b03a - progBTFFd uint32 - funcInfoRecSize uint32 - funcInfo internal.Pointer - funcInfoCnt uint32 - lineInfoRecSize uint32 - lineInfo internal.Pointer - lineInfoCnt uint32 - attachBTFID btf.TypeID - attachProgFd uint32 -} - type bpfProgInfo struct { prog_type uint32 id uint32 @@ -107,7 +84,7 @@ type bpfProgInfo struct { xlated_prog_insns internal.Pointer load_time uint64 // since 4.15 cb4d2b3f03d8 created_by_uid uint32 - nr_map_ids uint32 + nr_map_ids uint32 // since 4.15 cb4d2b3f03d8 map_ids internal.Pointer name internal.BPFObjName // since 4.15 067cae47771c ifindex uint32 @@ -145,11 +122,6 @@ type bpfProgTestRunAttr struct { duration uint32 } -type bpfGetFDByIDAttr struct { - id uint32 - next uint32 -} - type bpfMapFreezeAttr struct { mapFd uint32 } @@ -160,23 +132,6 @@ type bpfObjGetNextIDAttr struct { openFlags uint32 } -func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) { - for { - fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) - // As of ~4.20 the verifier can be interrupted by a signal, - // and returns EAGAIN in that case. - if errors.Is(err, unix.EAGAIN) { - continue - } - - if err != nil { - return nil, err - } - - return internal.NewFD(uint32(fd)), nil - } -} - func bpfProgTestRun(attr *bpfProgTestRunAttr) error { _, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) return err @@ -372,6 +327,10 @@ func wrapMapError(err error) error { return internal.SyscallError(ErrNotSupported, unix.ENOTSUPP) } + if errors.Is(err, unix.E2BIG) { + return fmt.Errorf("key too big for map: %w", err) + } + return err } @@ -388,8 +347,13 @@ func bpfMapFreeze(m *internal.FD) error { return err } -func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) { +func bpfGetProgInfoByFD(fd *internal.FD, ids []MapID) (*bpfProgInfo, error) { var info bpfProgInfo + if len(ids) > 0 { + info.nr_map_ids = uint32(len(ids)) + info.map_ids = internal.NewPointer(unsafe.Pointer(&ids[0])) + } + if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil { return nil, fmt.Errorf("can't get program info: %w", err) } @@ -471,10 +435,30 @@ var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { return nil }) -func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) { - attr := bpfGetFDByIDAttr{ - id: id, +var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error { + insns := asm.Instructions{ + asm.Mov.Reg(asm.R1, asm.R10), + asm.Add.Imm(asm.R1, -8), + asm.Mov.Imm(asm.R2, 8), + asm.Mov.Imm(asm.R3, 0), + asm.FnProbeReadKernel.Call(), + asm.Return(), + } + buf := bytes.NewBuffer(make([]byte, 0, len(insns)*asm.InstructionSize)) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return err } - ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) - return internal.NewFD(uint32(ptr)), err -} + bytecode := buf.Bytes() + + fd, err := internal.BPFProgLoad(&internal.BPFProgLoadAttr{ + ProgType: uint32(Kprobe), + License: internal.NewStringPointer("GPL"), + Instructions: internal.NewSlicePointer(bytecode), + InsCount: uint32(len(bytecode) / asm.InstructionSize), + }) + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}) diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go index 441a82fe4c1..84b83f9f985 100644 --- a/vendor/github.com/cilium/ebpf/types.go +++ b/vendor/github.com/cilium/ebpf/types.go @@ -4,12 +4,17 @@ import ( "github.com/cilium/ebpf/internal/unix" ) -//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType +//go:generate stringer -output types_string.go -type=MapType,ProgramType,PinType // MapType indicates the type map structure // that will be initialized in the kernel. type MapType uint32 +// Max returns the latest supported MapType. +func (_ MapType) Max() MapType { + return maxMapType - 1 +} + // All the various map types that can be created const ( UnspecifiedMap MapType = iota @@ -85,15 +90,28 @@ const ( SkStorage // DevMapHash - Hash-based indexing scheme for references to network devices. DevMapHash - StructOpts + // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF + // program. + StructOpsMap + // RingBuf - Similar to PerfEventArray, but shared across all CPUs. RingBuf + // InodeStorage - Specialized local storage map for inodes. InodeStorage + // TaskStorage - Specialized local storage map for task_struct. TaskStorage + // maxMapType - Bound enum of MapTypes, has to be last in enum. + maxMapType ) +// Deprecated: StructOpts was a typo, use StructOpsMap instead. +// +// Declared as a variable to prevent stringer from picking it up +// as an enum value. +var StructOpts MapType = StructOpsMap + // hasPerCPUValue returns true if the Map stores a value per CPU. func (mt MapType) hasPerCPUValue() bool { - return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash + return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage } // canStoreMap returns true if the map type accepts a map fd @@ -111,6 +129,11 @@ func (mt MapType) canStoreProgram() bool { // ProgramType of the eBPF program type ProgramType uint32 +// Max return the latest supported ProgramType. +func (_ ProgramType) Max() ProgramType { + return maxProgramType - 1 +} + // eBPF program types const ( UnspecifiedProgram ProgramType = iota @@ -144,6 +167,7 @@ const ( Extension LSM SkLookup + maxProgramType ) // AttachType of the eBPF program, needed to differentiate allowed context accesses in @@ -151,6 +175,8 @@ const ( // Will cause invalid argument (EINVAL) at program load time if set incorrectly. type AttachType uint32 +//go:generate stringer -type AttachType -trimprefix Attach + // AttachNone is an alias for AttachCGroupInetIngress for readability reasons. const AttachNone AttachType = 0 @@ -193,6 +219,10 @@ const ( AttachXDPCPUMap AttachSkLookup AttachXDP + AttachSkSKBVerdict + AttachSkReuseportSelect + AttachSkReuseportSelectOrMigrate + AttachPerfEvent ) // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go index c25f7656479..81cbc9efde0 100644 --- a/vendor/github.com/cilium/ebpf/types_string.go +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType"; DO NOT EDIT. +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. package ebpf @@ -34,15 +34,16 @@ func _() { _ = x[Stack-23] _ = x[SkStorage-24] _ = x[DevMapHash-25] - _ = x[StructOpts-26] + _ = x[StructOpsMap-26] _ = x[RingBuf-27] _ = x[InodeStorage-28] _ = x[TaskStorage-29] + _ = x[maxMapType-30] } -const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOptsRingBufInodeStorageTaskStorage" +const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType" -var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 258, 265, 277, 288} +var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300} func (i MapType) String() string { if i >= MapType(len(_MapType_index)-1) { @@ -85,11 +86,12 @@ func _() { _ = x[Extension-28] _ = x[LSM-29] _ = x[SkLookup-30] + _ = x[maxProgramType-31] } -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookup" +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupmaxProgramType" -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294} +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 308} func (i ProgramType) String() string { if i >= ProgramType(len(_ProgramType_index)-1) { @@ -97,61 +99,6 @@ func (i ProgramType) String() string { } return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] } -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[AttachNone-0] - _ = x[AttachCGroupInetIngress-0] - _ = x[AttachCGroupInetEgress-1] - _ = x[AttachCGroupInetSockCreate-2] - _ = x[AttachCGroupSockOps-3] - _ = x[AttachSkSKBStreamParser-4] - _ = x[AttachSkSKBStreamVerdict-5] - _ = x[AttachCGroupDevice-6] - _ = x[AttachSkMsgVerdict-7] - _ = x[AttachCGroupInet4Bind-8] - _ = x[AttachCGroupInet6Bind-9] - _ = x[AttachCGroupInet4Connect-10] - _ = x[AttachCGroupInet6Connect-11] - _ = x[AttachCGroupInet4PostBind-12] - _ = x[AttachCGroupInet6PostBind-13] - _ = x[AttachCGroupUDP4Sendmsg-14] - _ = x[AttachCGroupUDP6Sendmsg-15] - _ = x[AttachLircMode2-16] - _ = x[AttachFlowDissector-17] - _ = x[AttachCGroupSysctl-18] - _ = x[AttachCGroupUDP4Recvmsg-19] - _ = x[AttachCGroupUDP6Recvmsg-20] - _ = x[AttachCGroupGetsockopt-21] - _ = x[AttachCGroupSetsockopt-22] - _ = x[AttachTraceRawTp-23] - _ = x[AttachTraceFEntry-24] - _ = x[AttachTraceFExit-25] - _ = x[AttachModifyReturn-26] - _ = x[AttachLSMMac-27] - _ = x[AttachTraceIter-28] - _ = x[AttachCgroupInet4GetPeername-29] - _ = x[AttachCgroupInet6GetPeername-30] - _ = x[AttachCgroupInet4GetSockname-31] - _ = x[AttachCgroupInet6GetSockname-32] - _ = x[AttachXDPDevMap-33] - _ = x[AttachCgroupInetSockRelease-34] - _ = x[AttachXDPCPUMap-35] - _ = x[AttachSkLookup-36] - _ = x[AttachXDP-37] -} - -const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIterAttachCgroupInet4GetPeernameAttachCgroupInet6GetPeernameAttachCgroupInet4GetSocknameAttachCgroupInet6GetSocknameAttachXDPDevMapAttachCgroupInetSockReleaseAttachXDPCPUMapAttachSkLookupAttachXDP" - -var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582, 610, 638, 666, 694, 709, 736, 751, 765, 774} - -func (i AttachType) String() string { - if i >= AttachType(len(_AttachType_index)-1) { - return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] -} func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go index 129a92883dd..787c11fe56f 100644 --- a/vendor/github.com/containerd/console/console_windows.go +++ b/vendor/github.com/containerd/console/console_windows.go @@ -17,10 +17,10 @@ package console import ( + "errors" "fmt" "os" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -103,7 +103,7 @@ func (m *master) Reset() error { {m.err, m.errMode}, } { if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { - return errors.Wrap(err, "unable to restore console mode") + return fmt.Errorf("unable to restore console mode: %w", err) } } @@ -114,7 +114,7 @@ func (m *master) Size() (WinSize, error) { var info windows.ConsoleScreenBufferInfo err := windows.GetConsoleScreenBufferInfo(m.out, &info) if err != nil { - return WinSize{}, errors.Wrap(err, "unable to get console info") + return WinSize{}, fmt.Errorf("unable to get console info: %w", err) } winsize := WinSize{ @@ -139,7 +139,7 @@ func (m *master) DisableEcho() error { mode |= windows.ENABLE_LINE_INPUT if err := windows.SetConsoleMode(m.in, mode); err != nil { - return errors.Wrap(err, "unable to set console to disable echo") + return fmt.Errorf("unable to set console to disable echo: %w", err) } return nil @@ -192,7 +192,7 @@ func makeInputRaw(fd windows.Handle, mode uint32) error { } if err := windows.SetConsoleMode(fd, mode); err != nil { - return errors.Wrap(err, "unable to set console to raw mode") + return fmt.Errorf("unable to set console to raw mode: %w", err) } return nil diff --git a/vendor/github.com/containerd/console/console_zos.go b/vendor/github.com/containerd/console/console_zos.go new file mode 100644 index 00000000000..b348a839a03 --- /dev/null +++ b/vendor/github.com/containerd/console/console_zos.go @@ -0,0 +1,163 @@ +// +build zos + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + var f File + var err error + var slave string + for i := 0;; i++ { + ptyp := fmt.Sprintf("/dev/ptyp%04d", i) + f, err = os.OpenFile(ptyp, os.O_RDWR, 0600) + if err == nil { + slave = fmt.Sprintf("/dev/ttyp%04d", i) + break + } + if os.IsNotExist(err) { + return nil, "", err + } + // else probably Resource Busy + } + m, err := newMaster(f) + if err != nil { + return nil, "", err + } + return m, slave, nil +} + +type master struct { + f File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return tcswinsz(m.f.Fd(), ws) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState = cfmakeraw(rawState) + rawState.Oflag = rawState.Oflag | unix.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + return tcgwinsz(m.f.Fd()) +} + +func (m *master) Fd() uintptr { + return m.f.Fd() +} + +func (m *master) Name() string { + return m.f.Name() +} + +// checkConsole checks if the provided file is a console +func checkConsole(f File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f File) (Console, error) { + m := &master{ + f: f, + } + t, err := m.getCurrent() + if err != nil { + return nil, err + } + m.original = &t + return m, nil +} + +// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func ClearONLCR(fd uintptr) error { + return setONLCR(fd, false) +} + +// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts as intended for a terminal emulator. +func SetONLCR(fd uintptr) error { + return setONLCR(fd, true) +} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go index 5cd4c550ce8..a6bf01e8d1a 100644 --- a/vendor/github.com/containerd/console/tc_unix.go +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -1,4 +1,4 @@ -// +build darwin freebsd linux netbsd openbsd solaris +// +build darwin freebsd linux netbsd openbsd solaris zos /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go b/vendor/github.com/containerd/console/tc_zos.go similarity index 68% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go rename to vendor/github.com/containerd/console/tc_zos.go index 6e40a9c7d7f..4262eaf4cc0 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.go +++ b/vendor/github.com/containerd/console/tc_zos.go @@ -14,17 +14,13 @@ limitations under the License. */ -package sys +package console import ( - _ "unsafe" // required for go:linkname. + "golang.org/x/sys/unix" ) -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() - -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go index b7f86da8695..d3d9839d639 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/doc.go @@ -14,5 +14,5 @@ limitations under the License. */ -// Package events defines the event pushing and subscription service. +// Package events defines the ttrpc event service. package events diff --git a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s b/vendor/github.com/containerd/containerd/api/types/task/doc.go similarity index 91% rename from vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s rename to vendor/github.com/containerd/containerd/api/types/task/doc.go index c073fa4ad7c..e10c7a46993 100644 --- a/vendor/github.com/containerd/containerd/sys/subprocess_unsafe_linux.s +++ b/vendor/github.com/containerd/containerd/api/types/task/doc.go @@ -13,3 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ + +// Package task defines the task service. +package task diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index 8b600673fe3..5606cc88a94 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,15 +21,14 @@ package cio import ( "context" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "sync" "syscall" "github.com/containerd/fifo" - "github.com/pkg/errors" ) // NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root @@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) { return nil, err } } - dir, err := ioutil.TempDir(root, "") + dir, err := os.MkdirTemp(root, "") if err != nil { return nil, err } @@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { if fifos.Stdin != "" { if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdin fifo") + return f, fmt.Errorf("failed to open stdin fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdin != nil { @@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if fifos.Stdout != "" { if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stdout fifo") + return f, fmt.Errorf("failed to open stdout fifo: %w", retErr) } defer func() { if retErr != nil && f.Stdout != nil { @@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) { } if !fifos.Terminal && fifos.Stderr != "" { if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil { - return f, errors.Wrapf(retErr, "failed to open stderr fifo") + return f, fmt.Errorf("failed to open stderr fifo: %w", retErr) } } return f, nil diff --git a/vendor/github.com/containerd/containerd/cio/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go index ded475788f6..f3d736a6d42 100644 --- a/vendor/github.com/containerd/containerd/cio/io_windows.go +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -23,7 +23,6 @@ import ( winio "github.com/Microsoft/go-winio" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) const pipeRoot = `\\.\pipe` @@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdin != "" { l, err := winio.ListenPipe(fifos.Stdin, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin) + return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err) } cios.closers = append(cios.closers, l) @@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stdout != "" { l, err := winio.ListenPipe(fifos.Stdout, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout) + return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err) } cios.closers = append(cios.closers, l) @@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) { if fifos.Stderr != "" { l, err := winio.ListenPipe(fifos.Stderr, nil) if err != nil { - return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr) + return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err) } cios.closers = append(cios.closers, l) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go new file mode 100644 index 00000000000..1391884cde7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/defaults_darwin.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = "/var/lib/containerd" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/var/run/containerd" + // DefaultAddress is the default unix socket address + DefaultAddress = "/var/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/var/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/var/run/containerd/fifo" + // DefaultRuntime would be a multiple of choices, thus empty + DefaultRuntime = "" + // DefaultConfigDir is the default location for config files. + DefaultConfigDir = "/etc/containerd" +) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 6b69cd06b9b..8e2619a381a 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index a80700075f9..9f4bed8b077 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index 05a35228ca4..87622559708 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -17,7 +17,7 @@ // Package errdefs defines the common errors used throughout containerd // packages. // -// Use with errors.Wrap and error.Wrapf to add context to an error. +// Use with fmt.Errorf to add context to an error. // // To detect an error class, use the IsXXX functions to tell whether an error // is of a certain type. @@ -28,8 +28,7 @@ package errdefs import ( "context" - - "github.com/pkg/errors" + "errors" ) // Definitions of common error types used throughout containerd. All containerd diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go index 209f63bd0fc..7a9b33e05af 100644 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -18,9 +18,9 @@ package errdefs import ( "context" + "fmt" "strings" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -68,9 +68,9 @@ func ToGRPC(err error) error { // ToGRPCf maps the error to grpc error codes, assembling the formatting string // and combining it with the target error string. // -// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(errors.Wrapf(err, format, args...)) + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) } // FromGRPC returns the underlying error from a grpc service based on the grpc error code @@ -104,9 +104,9 @@ func FromGRPC(err error) error { msg := rebaseMessage(cls, err) if msg != "" { - err = errors.Wrap(cls, msg) + err = fmt.Errorf("%s: %w", msg, cls) } else { - err = errors.WithStack(cls) + err = cls } return err diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go new file mode 100644 index 00000000000..a1f385d7abd --- /dev/null +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -0,0 +1,251 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package exchange + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/typeurl" + goevents "github.com/docker/go-events" + "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" +) + +// Exchange broadcasts events +type Exchange struct { + broadcaster *goevents.Broadcaster +} + +// NewExchange returns a new event Exchange +func NewExchange() *Exchange { + return &Exchange{ + broadcaster: goevents.NewBroadcaster(), + } +} + +var _ events.Publisher = &Exchange{} +var _ events.Forwarder = &Exchange{} +var _ events.Subscriber = &Exchange{} + +// Forward accepts an envelope to be directly distributed on the exchange. +// +// This is useful when an event is forwarded on behalf of another namespace or +// when the event is propagated on behalf of another publisher. +func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) { + if err := validateEnvelope(envelope); err != nil { + return err + } + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error forwarding event") + } else { + logger.Debug("event forwarded") + } + }() + + return e.broadcaster.Write(envelope) +} + +// Publish packages and sends an event. The caller will be considered the +// initial publisher of the event. This means the timestamp will be calculated +// at this point and this method may read from the calling context. +func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) { + var ( + namespace string + encoded *types.Any + envelope events.Envelope + ) + + namespace, err = namespaces.NamespaceRequired(ctx) + if err != nil { + return fmt.Errorf("failed publishing event: %w", err) + } + if err := validateTopic(topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", topic, err) + } + + encoded, err = typeurl.MarshalAny(event) + if err != nil { + return err + } + + envelope.Timestamp = time.Now().UTC() + envelope.Namespace = namespace + envelope.Topic = topic + envelope.Event = encoded + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error publishing event") + } else { + logger.Debug("event published") + } + }() + + return e.broadcaster.Write(&envelope) +} + +// Subscribe to events on the exchange. Events are sent through the returned +// channel ch. If an error is encountered, it will be sent on channel errs and +// errs will be closed. To end the subscription, cancel the provided context. +// +// Zero or more filters may be provided as strings. Only events that match +// *any* of the provided filters will be sent on the channel. The filters use +// the standard containerd filters package syntax. +func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) { + var ( + evch = make(chan *events.Envelope) + errq = make(chan error, 1) + channel = goevents.NewChannel(0) + queue = goevents.NewQueue(channel) + dst goevents.Sink = queue + ) + + closeAll := func() { + channel.Close() + queue.Close() + e.broadcaster.Remove(dst) + close(errq) + } + + ch = evch + errs = errq + + if len(fs) > 0 { + filter, err := filters.ParseAll(fs...) + if err != nil { + errq <- fmt.Errorf("failed parsing subscription filters: %w", err) + closeAll() + return + } + + dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool { + return filter.Match(adapt(gev)) + })) + } + + e.broadcaster.Add(dst) + + go func() { + defer closeAll() + + var err error + loop: + for { + select { + case ev := <-channel.C: + env, ok := ev.(*events.Envelope) + if !ok { + // TODO(stevvooe): For the most part, we are well protected + // from this condition. Both Forward and Publish protect + // from this. + err = fmt.Errorf("invalid envelope encountered %#v; please file a bug", ev) + break + } + + select { + case evch <- env: + case <-ctx.Done(): + break loop + } + case <-ctx.Done(): + break loop + } + } + + if err == nil { + if cerr := ctx.Err(); cerr != context.Canceled { + err = cerr + } + } + + errq <- err + }() + + return +} + +func validateTopic(topic string) error { + if topic == "" { + return fmt.Errorf("must not be empty: %w", errdefs.ErrInvalidArgument) + } + + if topic[0] != '/' { + return fmt.Errorf("must start with '/': %w", errdefs.ErrInvalidArgument) + } + + if len(topic) == 1 { + return fmt.Errorf("must have at least one component: %w", errdefs.ErrInvalidArgument) + } + + components := strings.Split(topic[1:], "/") + for _, component := range components { + if err := identifiers.Validate(component); err != nil { + return fmt.Errorf("failed validation on component %q: %w", component, err) + } + } + + return nil +} + +func validateEnvelope(envelope *events.Envelope) error { + if err := identifiers.Validate(envelope.Namespace); err != nil { + return fmt.Errorf("event envelope has invalid namespace: %w", err) + } + + if err := validateTopic(envelope.Topic); err != nil { + return fmt.Errorf("envelope topic %q: %w", envelope.Topic, err) + } + + if envelope.Timestamp.IsZero() { + return fmt.Errorf("timestamp must be set on forwarded event: %w", errdefs.ErrInvalidArgument) + } + + return nil +} + +func adapt(ev interface{}) filters.Adaptor { + if adaptor, ok := ev.(filters.Adaptor); ok { + return adaptor + } + + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + return "", false + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 00000000000..5a9c559c1e0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 00000000000..cf09d8d9e4f --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +// +package filters + +import ( + "regexp" + + "github.com/containerd/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 00000000000..49182d7b7bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,291 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := + +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, fmt.Errorf("filters: %w", err) + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return fmt.Errorf("parse error: %w", parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 00000000000..b76aab9b4a7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,252 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "errors" + "unicode/utf8" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 00000000000..6a485467b8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go index f52317b491b..cbd3a52ba9e 100644 --- a/vendor/github.com/containerd/containerd/identifiers/validate.go +++ b/vendor/github.com/containerd/containerd/identifiers/validate.go @@ -25,10 +25,10 @@ package identifiers import ( + "fmt" "regexp" "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" ) const ( @@ -51,15 +51,15 @@ var ( // In general identifiers that pass this validation should be safe for use as filesystem path components. func Validate(s string) error { if len(s) == 0 { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty") + return fmt.Errorf("identifier must not be empty: %w", errdefs.ErrInvalidArgument) } if len(s) > maxLength { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength) + return fmt.Errorf("identifier %q greater than maximum length (%d characters): %w", s, maxLength, errdefs.ErrInvalidArgument) } if !identifierRe.MatchString(s) { - return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe) + return fmt.Errorf("identifier %q must match %v: %w", s, identifierRe, errdefs.ErrInvalidArgument) } return nil } diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 37b6a7d1c8a..0db9562b82b 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -52,7 +52,8 @@ const ( // WithLogger returns a new context with the provided logger. Use in // combination with logger.WithField(s) for great effect. func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) + e := logger.WithContext(ctx) + return context.WithValue(ctx, loggerKey{}, e) } // GetLogger retrieves the current logger from the context. If no logger is @@ -61,7 +62,7 @@ func GetLogger(ctx context.Context) *logrus.Entry { logger := ctx.Value(loggerKey{}) if logger == nil { - return L + return L.WithContext(ctx) } return logger.(*logrus.Entry) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index b53c9012c1b..e5e23fe4301 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -18,11 +18,11 @@ package namespaces import ( "context" + "fmt" "os" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/identifiers" - "github.com/pkg/errors" ) const ( @@ -69,10 +69,10 @@ func Namespace(ctx context.Context) (string, bool) { func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { - return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") + return "", fmt.Errorf("namespace is required: %w", errdefs.ErrFailedPrecondition) } if err := identifiers.Validate(namespace); err != nil { - return "", errors.Wrap(err, "namespace validation") + return "", fmt.Errorf("namespace validation: %w", err) } return namespace, nil } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go index b382215025d..176a0e6673a 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/constants/constants.go @@ -16,11 +16,11 @@ package constants -// TODO(random-liu): Merge annotations package into this package. - const ( // K8sContainerdNamespace is the namespace we use to connect containerd. K8sContainerdNamespace = "k8s.io" - // CRIVersion is the CRI version supported by the CRI plugin. - CRIVersion = "v1alpha2" + // CRIVersion is the latest CRI version supported by the CRI plugin. + CRIVersion = "v1" + // CRIVersionAlpha is the alpha version of CRI supported by the CRI plugin. + CRIVersionAlpha = "v1alpha2" ) diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go index 59d41411f23..2cdc97e1de5 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers.go @@ -25,7 +25,7 @@ import ( "github.com/containerd/containerd/cio" "golang.org/x/net/context" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" ) // AttachOptions specifies how to attach to a container. diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go index 2780b958ab5..f0f90840160 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go index dcc9fe6eb94..b6c9c12028f 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/helpers_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,13 +17,13 @@ package io import ( + "fmt" "io" "net" "os" "sync" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -62,7 +60,7 @@ func openPipe(ctx context.Context, fn string, flag int, perm os.FileMode) (io.Re func (p *pipe) Write(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Write(b) } @@ -70,7 +68,7 @@ func (p *pipe) Write(b []byte) (int, error) { func (p *pipe) Read(b []byte) (int, error) { p.conWg.Wait() if p.conErr != nil { - return 0, errors.Wrap(p.conErr, "connection error") + return 0, fmt.Errorf("connection error: %w", p.conErr) } return p.con.Read(b) } diff --git a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go index 27721e71724..674a89fdff8 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/io/logger.go @@ -21,11 +21,10 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "time" "github.com/sirupsen/logrus" - runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtime "k8s.io/cri-api/pkg/apis/runtime/v1" cioutil "github.com/containerd/containerd/pkg/ioutil" ) @@ -43,7 +42,7 @@ const ( // NewDiscardLogger creates logger which discards all the input. func NewDiscardLogger() io.WriteCloser { - return cioutil.NewNopWriteCloser(ioutil.Discard) + return cioutil.NewNopWriteCloser(io.Discard) } // NewCRILogger returns a write closer which redirect container log into diff --git a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go index d0e0bf37efa..cf027eaf8c0 100644 --- a/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go +++ b/vendor/github.com/containerd/containerd/pkg/cri/util/deep_copy.go @@ -18,8 +18,8 @@ package util import ( "encoding/json" - - "github.com/pkg/errors" + "errors" + "fmt" ) // DeepCopy makes a deep copy from src into dst. @@ -32,11 +32,11 @@ func DeepCopy(dst interface{}, src interface{}) error { } bytes, err := json.Marshal(src) if err != nil { - return errors.Wrap(err, "unable to marshal src") + return fmt.Errorf("unable to marshal src: %w", err) } err = json.Unmarshal(bytes, dst) if err != nil { - return errors.Wrap(err, "unable to unmarshal into dst") + return fmt.Errorf("unable to unmarshal into dst: %w", err) } return nil } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go index aa604baab92..74c303b944f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer.go @@ -18,10 +18,9 @@ package dialer import ( "context" + "fmt" "net" "time" - - "github.com/pkg/errors" ) type dialResult struct { @@ -74,6 +73,6 @@ func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) { dr.c.Close() } }() - return nil, errors.Errorf("dial %s: timeout", address) + return nil, fmt.Errorf("dial %s: timeout", address) } } diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go index e7d19583395..b4304ffbf1f 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go new file mode 100644 index 00000000000..bc1af75abb7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package shutdown + +import ( + "context" + "errors" + "sync" + "time" + + "golang.org/x/sync/errgroup" +) + +// ErrShutdown is the error condition when a context has been fully shutdown +var ErrShutdown = errors.New("shutdown") + +// Service is used to facilitate shutdown by through callback +// registration and shutdown initiation +type Service interface { + // Shutdown initiates shutdown + Shutdown() + // RegisterCallback registers functions to be called on shutdown and before + // the shutdown channel is closed. A callback error will propagate to the + // context error + RegisterCallback(func(context.Context) error) +} + +// WithShutdown returns a context which is similar to a cancel context, but +// with callbacks which can propagate to the context error. Unlike a cancel +// context, the shutdown context cannot be canceled from the parent context. +// However, future child contexes will be canceled upon shutdown. +func WithShutdown(ctx context.Context) (context.Context, Service) { + ss := &shutdownService{ + Context: ctx, + doneC: make(chan struct{}), + timeout: 30 * time.Second, + } + return ss, ss +} + +type shutdownService struct { + context.Context + + mu sync.Mutex + isShutdown bool + callbacks []func(context.Context) error + doneC chan struct{} + err error + timeout time.Duration +} + +func (s *shutdownService) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + if s.isShutdown { + return + } + s.isShutdown = true + + go func(callbacks []func(context.Context) error) { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + grp, ctx := errgroup.WithContext(ctx) + for i := range callbacks { + fn := callbacks[i] + grp.Go(func() error { return fn(ctx) }) + } + err := grp.Wait() + if err == nil { + err = ErrShutdown + } + s.mu.Lock() + s.err = err + close(s.doneC) + s.mu.Unlock() + }(s.callbacks) +} + +func (s *shutdownService) Done() <-chan struct{} { + return s.doneC +} + +func (s *shutdownService) Err() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.err +} +func (s *shutdownService) RegisterCallback(fn func(context.Context) error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.callbacks == nil { + s.callbacks = []func(context.Context) error{} + } + s.callbacks = append(s.callbacks, fn) +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go index 8b4d925d28e..f05ab7aa9f9 100644 --- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go @@ -17,13 +17,14 @@ package ttrpcutil import ( + "errors" + "fmt" "sync" "time" v1 "github.com/containerd/containerd/api/services/ttrpc/events/v1" "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/ttrpc" - "github.com/pkg/errors" ) const ttrpcDialTimeout = 5 * time.Second @@ -43,7 +44,7 @@ func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { connector := func() (*ttrpc.Client, error) { conn, err := dialer.Dialer(address, ttrpcDialTimeout) if err != nil { - return nil, errors.Wrap(err, "failed to connect") + return nil, fmt.Errorf("failed to connect: %w", err) } client := ttrpc.NewClient(conn, opts...) diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go index aab756fd2a6..4f8d7dd2d59 100644 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go index c7657e1869b..3913ef66373 100644 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -38,12 +38,22 @@ func platformVector(platform specs.Platform) []specs.Platform { switch platform.Architecture { case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } vector = append(vector, specs.Platform{ Architecture: "386", OS: platform.OS, OSVersion: platform.OSVersion, OSFeatures: platform.OSFeatures, - Variant: platform.Variant, }) case "arm": if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index 4a7177e3138..046e0356d19 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -18,6 +18,7 @@ package platforms import ( "bufio" + "fmt" "os" "runtime" "strings" @@ -25,7 +26,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/pkg/errors" ) // Present the ARM instruction set architecture, eg: v7, v8 @@ -48,7 +48,7 @@ func cpuVariant() string { // by ourselves. We can just parse these information from /proc/cpuinfo func getCPUInfo(pattern string) (info string, err error) { if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) } cpuinfo, err := os.Open("/proc/cpuinfo") @@ -75,7 +75,7 @@ func getCPUInfo(pattern string) (info string, err error) { return "", err } - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) + return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) } func getCPUVariant() string { diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index 6ede94061eb..dbe9957ca9d 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -38,7 +38,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +60,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false @@ -86,9 +86,11 @@ func normalizeArch(arch, variant string) (string, string) { case "i386": arch = "386" variant = "" - case "x86_64", "x86-64": + case "x86_64", "x86-64", "amd64": arch = "amd64" - variant = "" + if variant == "v1" { + variant = "" + } case "aarch64", "arm64": arch = "arm64" switch variant { diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go index cb77fbc9f7b..cfa3ff34a19 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -16,27 +16,11 @@ package platforms -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - // DefaultString returns the default string specifier for the platform. func DefaultString() string { return Format(DefaultSpec()) } -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - // DefaultStrict returns strict form of Default. func DefaultStrict() MatchComparer { return OnlyStrict(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go new file mode 100644 index 00000000000..e249fe48d38 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -0,0 +1,45 @@ +//go:build darwin +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go index e8a7d5ffa0d..49690f1b3e7 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !darwin +// +build !windows,!darwin /* Copyright The containerd Authors. @@ -18,6 +19,22 @@ package platforms +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + // Default returns the default matcher for the platform. func Default() MatchComparer { return Only(DefaultSpec()) diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index 0c380e3b7c1..c1aaf72ca8e 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -29,6 +27,18 @@ import ( "golang.org/x/sys/windows" ) +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + type matchComparer struct { defaults Matcher osVersionPrefix string diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 088bdea0508..8f955d036df 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -107,6 +107,8 @@ package platforms import ( + "fmt" + "path" "regexp" "runtime" "strconv" @@ -114,7 +116,6 @@ import ( "github.com/containerd/containerd/errdefs" specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" ) var ( @@ -166,14 +167,14 @@ func (m *matcher) String() string { func Parse(specifier string) (specs.Platform, error) { if strings.Contains(specifier, "*") { // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) } parts := strings.Split(specifier, "/") for _, part := range parts { if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) } } @@ -205,7 +206,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) case 2: // In this case, we treat as a regular os/arch pair. We don't care // about whether or not we know of the platform. @@ -227,7 +228,7 @@ func Parse(specifier string) (specs.Platform, error) { return p, nil } - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) } // MustParse is like Parses but panics if the specifier cannot be parsed. @@ -246,20 +247,7 @@ func Format(platform specs.Platform) string { return "unknown" } - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") + return path.Join(platform.OS, platform.Architecture, platform.Variant) } // Normalize validates and translate the platform to the canonical value. @@ -269,10 +257,5 @@ func joinNotEmpty(s ...string) string { func Normalize(platform specs.Platform) specs.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - return platform } diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go new file mode 100644 index 00000000000..dcb533c8a74 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -0,0 +1,171 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// InitContext is used for plugin initialization +type InitContext struct { + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + + // deprecated: will be removed in 2.0, use plugin.EventType + Events *exchange.Exchange + + Meta *Meta // plugins can fill in metadata at init. + + plugins *Set +} + +// NewContext returns a new plugin InitContext +func NewContext(ctx context.Context, r *Registration, plugins *Set, root, state string) *InitContext { + return &InitContext{ + Context: ctx, + Root: filepath.Join(root, r.URI()), + State: filepath.Join(state, r.URI()), + Meta: &Meta{ + Exports: map[string]string{}, + }, + plugins: plugins, + } +} + +// Get returns the first plugin by its type +func (i *InitContext) Get(t Type) (interface{}, error) { + return i.plugins.Get(t) +} + +// Meta contains information gathered from the registration and initialization +// process. +type Meta struct { + Platforms []ocispec.Platform // platforms supported by plugin + Exports map[string]string // values exported by plugin + Capabilities []string // feature switches for plugin +} + +// Plugin represents an initialized plugin, used with an init context. +type Plugin struct { + Registration *Registration // registration, as initialized + Config interface{} // config, as initialized + Meta *Meta + + instance interface{} + err error // will be set if there was an error initializing the plugin +} + +// Err returns the errors during initialization. +// returns nil if not error was encountered +func (p *Plugin) Err() error { + return p.err +} + +// Instance returns the instance and any initialization error of the plugin +func (p *Plugin) Instance() (interface{}, error) { + return p.instance, p.err +} + +// Set defines a plugin collection, used with InitContext. +// +// This maintains ordering and unique indexing over the set. +// +// After iteratively instantiating plugins, this set should represent, the +// ordered, initialization set of plugins for a containerd instance. +type Set struct { + ordered []*Plugin // order of initialization + byTypeAndID map[Type]map[string]*Plugin +} + +// NewPluginSet returns an initialized plugin set +func NewPluginSet() *Set { + return &Set{ + byTypeAndID: make(map[Type]map[string]*Plugin), + } +} + +// Add a plugin to the set +func (ps *Set) Add(p *Plugin) error { + if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok { + ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{ + p.Registration.ID: p, + } + } else if _, idok := byID[p.Registration.ID]; !idok { + byID[p.Registration.ID] = p + } else { + return fmt.Errorf("plugin %v already initialized: %w", p.Registration.URI(), errdefs.ErrAlreadyExists) + } + + ps.ordered = append(ps.ordered, p) + return nil +} + +// Get returns the first plugin by its type +func (ps *Set) Get(t Type) (interface{}, error) { + for _, v := range ps.byTypeAndID[t] { + return v.Instance() + } + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) +} + +// GetAll returns all initialized plugins +func (ps *Set) GetAll() []*Plugin { + return ps.ordered +} + +// Plugins returns plugin set +func (i *InitContext) Plugins() *Set { + return i.plugins +} + +// GetAll plugins in the set +func (i *InitContext) GetAll() []*Plugin { + return i.plugins.GetAll() +} + +// GetByID returns the plugin of the given type and ID +func (i *InitContext) GetByID(t Type, id string) (interface{}, error) { + ps, err := i.GetByType(t) + if err != nil { + return nil, err + } + p, ok := ps[id] + if !ok { + return nil, fmt.Errorf("no %s plugins with id %s: %w", t, id, errdefs.ErrNotFound) + } + return p.Instance() +} + +// GetByType returns all plugins with the specific type. +func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) { + p, ok := i.plugins.byTypeAndID[t] + if !ok { + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) + } + + return p, nil +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go new file mode 100644 index 00000000000..eb38c127157 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -0,0 +1,223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "errors" + "fmt" + "sync" +) + +var ( + // ErrNoType is returned when no type is specified + ErrNoType = errors.New("plugin: no type") + // ErrNoPluginID is returned when no id is specified + ErrNoPluginID = errors.New("plugin: no id") + // ErrIDRegistered is returned when a duplicate id is already registered + ErrIDRegistered = errors.New("plugin: id already registered") + // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded, + // this allows the plugin loader differentiate between a plugin which is configured + // not to load and one that fails to load. + ErrSkipPlugin = errors.New("skip plugin") + + // ErrInvalidRequires will be thrown if the requirements for a plugin are + // defined in an invalid manner. + ErrInvalidRequires = errors.New("invalid requires") +) + +// IsSkipPlugin returns true if the error is skipping the plugin +func IsSkipPlugin(err error) bool { + return errors.Is(err, ErrSkipPlugin) +} + +// Type is the type of the plugin +type Type string + +func (t Type) String() string { return string(t) } + +const ( + // InternalPlugin implements an internal plugin to containerd + InternalPlugin Type = "io.containerd.internal.v1" + // RuntimePlugin implements a runtime + RuntimePlugin Type = "io.containerd.runtime.v1" + // RuntimePluginV2 implements a runtime v2 + RuntimePluginV2 Type = "io.containerd.runtime.v2" + // ServicePlugin implements a internal service + ServicePlugin Type = "io.containerd.service.v1" + // GRPCPlugin implements a grpc service + GRPCPlugin Type = "io.containerd.grpc.v1" + // TTRPCPlugin implements a ttrpc shim service + TTRPCPlugin Type = "io.containerd.ttrpc.v1" + // SnapshotPlugin implements a snapshotter + SnapshotPlugin Type = "io.containerd.snapshotter.v1" + // TaskMonitorPlugin implements a task monitor + TaskMonitorPlugin Type = "io.containerd.monitor.v1" + // DiffPlugin implements a differ + DiffPlugin Type = "io.containerd.differ.v1" + // MetadataPlugin implements a metadata store + MetadataPlugin Type = "io.containerd.metadata.v1" + // ContentPlugin implements a content store + ContentPlugin Type = "io.containerd.content.v1" + // GCPlugin implements garbage collection policy + GCPlugin Type = "io.containerd.gc.v1" + // EventPlugin implements event handling + EventPlugin Type = "io.containerd.event.v1" + // TracingProcessorPlugin implements a open telemetry span processor + TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1" +) + +const ( + // RuntimeLinuxV1 is the legacy linux runtime + RuntimeLinuxV1 = "io.containerd.runtime.v1.linux" + // RuntimeRuncV1 is the runc runtime that supports a single container + RuntimeRuncV1 = "io.containerd.runc.v1" + // RuntimeRuncV2 is the runc runtime that supports multiple containers per shim + RuntimeRuncV2 = "io.containerd.runc.v2" +) + +// Registration contains information for registering a plugin +type Registration struct { + // Type of the plugin + Type Type + // ID of the plugin + ID string + // Config specific to the plugin + Config interface{} + // Requires is a list of plugins that the registered plugin requires to be available + Requires []Type + + // InitFn is called when initializing a plugin. The registration and + // context are passed in. The init function may modify the registration to + // add exports, capabilities and platform support declarations. + InitFn func(*InitContext) (interface{}, error) + // Disable the plugin from loading + Disable bool +} + +// Init the registered plugin +func (r *Registration) Init(ic *InitContext) *Plugin { + p, err := r.InitFn(ic) + return &Plugin{ + Registration: r, + Config: ic.Config, + Meta: ic.Meta, + instance: p, + err: err, + } +} + +// URI returns the full plugin URI +func (r *Registration) URI() string { + return fmt.Sprintf("%s.%s", r.Type, r.ID) +} + +var register = struct { + sync.RWMutex + r []*Registration +}{} + +// Load loads all plugins at the provided path into containerd +func Load(path string) (err error) { + defer func() { + if v := recover(); v != nil { + rerr, ok := v.(error) + if !ok { + rerr = fmt.Errorf("%s", v) + } + err = rerr + } + }() + return loadPlugins(path) +} + +// Register allows plugins to register +func Register(r *Registration) { + register.Lock() + defer register.Unlock() + + if r.Type == "" { + panic(ErrNoType) + } + if r.ID == "" { + panic(ErrNoPluginID) + } + if err := checkUnique(r); err != nil { + panic(err) + } + + for _, requires := range r.Requires { + if requires == "*" && len(r.Requires) != 1 { + panic(ErrInvalidRequires) + } + } + + register.r = append(register.r, r) +} + +func checkUnique(r *Registration) error { + for _, registered := range register.r { + if r.URI() == registered.URI() { + return fmt.Errorf("%s: %w", r.URI(), ErrIDRegistered) + } + } + return nil +} + +// DisableFilter filters out disabled plugins +type DisableFilter func(r *Registration) bool + +// Graph returns an ordered list of registered plugins for initialization. +// Plugins in disableList specified by id will be disabled. +func Graph(filter DisableFilter) (ordered []*Registration) { + register.RLock() + defer register.RUnlock() + + for _, r := range register.r { + if filter(r) { + r.Disable = true + } + } + + added := map[*Registration]bool{} + for _, r := range register.r { + if r.Disable { + continue + } + children(r, added, &ordered) + if !added[r] { + ordered = append(ordered, r) + added[r] = true + } + } + return ordered +} + +func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) { + for _, t := range reg.Requires { + for _, r := range register.r { + if !r.Disable && + r.URI() != reg.URI() && + (t == "*" || r.Type == t) { + children(r, added, ordered) + if !added[r] { + *ordered = append(*ordered, r) + added[r] = true + } + } + } + } +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go new file mode 100644 index 00000000000..0df0669d29a --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -0,0 +1,63 @@ +//go:build go1.8 && !windows && amd64 && !static_build && !gccgo +// +build go1.8,!windows,amd64,!static_build,!gccgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "path/filepath" + "plugin" + "runtime" +) + +// loadPlugins loads all plugins for the OS and Arch +// that containerd is built for inside the provided path +func loadPlugins(path string) error { + abs, err := filepath.Abs(path) + if err != nil { + return err + } + pattern := filepath.Join(abs, fmt.Sprintf( + "*-%s-%s.%s", + runtime.GOOS, + runtime.GOARCH, + getLibExt(), + )) + libs, err := filepath.Glob(pattern) + if err != nil { + return err + } + for _, lib := range libs { + if _, err := plugin.Open(lib); err != nil { + return err + } + } + return nil +} + +// getLibExt returns a platform specific lib extension for +// the platform that containerd is running on +func getLibExt() string { + switch runtime.GOOS { + case "windows": + return "dll" + default: + return "so" + } +} diff --git a/vendor/github.com/containerd/containerd/sys/stat_unix.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go similarity index 51% rename from vendor/github.com/containerd/containerd/sys/stat_unix.go rename to vendor/github.com/containerd/containerd/plugin/plugin_other.go index 21a666dff86..a2883bbbadf 100644 --- a/vendor/github.com/containerd/containerd/sys/stat_unix.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,4 +1,5 @@ -// +build linux solaris +//go:build !go1.8 || windows || !amd64 || static_build || gccgo +// +build !go1.8 windows !amd64 static_build gccgo /* Copyright The containerd Authors. @@ -16,29 +17,9 @@ limitations under the License. */ -package sys +package plugin -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +func loadPlugins(path string) error { + // plugins not supported until 1.8 + return nil } diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go index 0998639b030..6fa97dfdca9 100644 --- a/vendor/github.com/containerd/containerd/reference/docker/reference.go +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -338,11 +338,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, + repo := repository{} + if r, ok := ref.(namedRepository); ok { + repo.domain, repo.path = r.Domain(), r.Path() + } else { + repo.domain, repo.path = splitDomain(ref.Name()) } + return repo } func getBestReferenceType(ref reference) Reference { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index c14aacca99b..9d3a904237e 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -18,6 +18,7 @@ package shim import ( "context" + "errors" "flag" "fmt" "io" @@ -30,21 +31,15 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/shutdown" + "github.com/containerd/containerd/plugin" shimapi "github.com/containerd/containerd/runtime/v2/task" "github.com/containerd/containerd/version" "github.com/containerd/ttrpc" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Client for a shim server -type Client struct { - service shimapi.TaskService - context context.Context - signals chan os.Signal -} - // Publisher for events type Publisher interface { events.Publisher @@ -53,22 +48,37 @@ type Publisher interface { // StartOpts describes shim start configuration received from containerd type StartOpts struct { - ID string + ID string // TODO(2.0): Remove ID, passed directly to start for call symmetry ContainerdBinary string Address string TTRPCAddress string } +type StopStatus struct { + Pid int + ExitStatus int + ExitedAt time.Time +} + // Init func for the creation of a shim server +// TODO(2.0): Remove init function type Init func(context.Context, string, Publisher, func()) (Shim, error) // Shim server interface +// TODO(2.0): Remove unified shim interface type Shim interface { shimapi.TaskService Cleanup(ctx context.Context) (*shimapi.DeleteResponse, error) StartShim(ctx context.Context, opts StartOpts) (string, error) } +// Manager is the interface which manages the shim process +type Manager interface { + Name() string + Start(ctx context.Context, id string, opts StartOpts) (string, error) + Stop(ctx context.Context, id string) (StopStatus, error) +} + // OptsKey is the context key for the Opts value. type OptsKey struct{} @@ -91,10 +101,23 @@ type Config struct { NoSetupLogger bool } +type ttrpcService interface { + RegisterTTRPC(*ttrpc.Server) error +} + +type taskService struct { + shimapi.TaskService +} + +func (t taskService) RegisterTTRPC(server *ttrpc.Server) error { + shimapi.RegisterTaskService(server, t.TaskService) + return nil +} + var ( debugFlag bool versionFlag bool - idFlag string + id string namespaceFlag string socketFlag string bundlePath string @@ -111,7 +134,7 @@ func parseFlags() { flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") flag.BoolVar(&versionFlag, "v", false, "show the shim version and exit") flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") - flag.StringVar(&idFlag, "id", "", "id of the task") + flag.StringVar(&id, "id", "", "id of the task") flag.StringVar(&socketFlag, "socket", "", "socket path to serve") flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") @@ -136,35 +159,85 @@ func setRuntime() { } } -func setLogger(ctx context.Context, id string) error { - logrus.SetFormatter(&logrus.TextFormatter{ +func setLogger(ctx context.Context, id string) (context.Context, error) { + l := log.G(ctx) + l.Logger.SetFormatter(&logrus.TextFormatter{ TimestampFormat: log.RFC3339NanoFixed, FullTimestamp: true, }) if debugFlag { - logrus.SetLevel(logrus.DebugLevel) + l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) - if err != nil { - return err + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + return ctx, err } - logrus.SetOutput(f) - return nil + l.Logger.SetOutput(f) + return log.WithLogger(ctx, l), nil } // Run initializes and runs a shim server -func Run(id string, initFunc Init, opts ...BinaryOpts) { +// TODO(2.0): Remove function +func Run(name string, initFunc Init, opts ...BinaryOpts) { + var config Config + for _, o := range opts { + o(&config) + } + + ctx := context.Background() + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", name)) + + if err := run(ctx, nil, initFunc, name, config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", name, err) + os.Exit(1) + } +} + +// TODO(2.0): Remove this type +type shimToManager struct { + shim Shim + name string +} + +func (stm shimToManager) Name() string { + return stm.name +} + +func (stm shimToManager) Start(ctx context.Context, id string, opts StartOpts) (string, error) { + opts.ID = id + return stm.shim.StartShim(ctx, opts) +} + +func (stm shimToManager) Stop(ctx context.Context, id string) (StopStatus, error) { + // shim must already have id + dr, err := stm.shim.Cleanup(ctx) + if err != nil { + return StopStatus{}, err + } + return StopStatus{ + Pid: int(dr.Pid), + ExitStatus: int(dr.ExitStatus), + ExitedAt: dr.ExitedAt, + }, nil +} + +// RunManager initialzes and runs a shim server +// TODO(2.0): Rename to Run +func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { var config Config for _, o := range opts { o(&config) } - if err := run(id, initFunc, config); err != nil { - fmt.Fprintf(os.Stderr, "%s: %s\n", id, err) + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", manager.Name())) + + if err := run(ctx, manager, nil, "", config); err != nil { + fmt.Fprintf(os.Stderr, "%s: %s", manager.Name(), err) os.Exit(1) } } -func run(id string, initFunc Init, config Config) error { +func run(ctx context.Context, manager Manager, initFunc Init, name string, config Config) error { parseFlags() if versionFlag { fmt.Printf("%s:\n", os.Args[0]) @@ -182,12 +255,12 @@ func run(id string, initFunc Init, config Config) error { setRuntime() signals, err := setupSignals(config) - if err != nil { + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } if !config.NoSubreaper { - if err := subreaper(); err != nil { + if err := subreaper(); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } } @@ -199,27 +272,49 @@ func run(id string, initFunc Init, config Config) error { } defer publisher.Close() - ctx := namespaces.WithNamespace(context.Background(), namespaceFlag) + ctx = namespaces.WithNamespace(ctx, namespaceFlag) ctx = context.WithValue(ctx, OptsKey{}, Opts{BundlePath: bundlePath, Debug: debugFlag}) - ctx = log.WithLogger(ctx, log.G(ctx).WithField("runtime", id)) - ctx, cancel := context.WithCancel(ctx) - service, err := initFunc(ctx, idFlag, publisher, cancel) - if err != nil { - return err + ctx, sd := shutdown.WithShutdown(ctx) + defer sd.Shutdown() + + if manager == nil { + service, err := initFunc(ctx, id, publisher, sd.Shutdown) + if err != nil { + return err + } + plugin.Register(&plugin.Registration{ + Type: plugin.TTRPCPlugin, + ID: "task", + Requires: []plugin.Type{ + plugin.EventPlugin, + }, + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return taskService{service}, nil + }, + }) + manager = shimToManager{ + shim: service, + name: name, + } } + // Handle explicit actions switch action { case "delete": - logger := logrus.WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "namespace": namespaceFlag, }) - go handleSignals(ctx, logger, signals) - response, err := service.Cleanup(ctx) + go reap(ctx, logger, signals) + ss, err := manager.Stop(ctx, id) if err != nil { return err } - data, err := proto.Marshal(response) + data, err := proto.Marshal(&shimapi.DeleteResponse{ + Pid: uint32(ss.Pid), + ExitStatus: uint32(ss.ExitStatus), + ExitedAt: ss.ExitedAt, + }) if err != nil { return err } @@ -229,12 +324,12 @@ func run(id string, initFunc Init, config Config) error { return nil case "start": opts := StartOpts{ - ID: idFlag, ContainerdBinary: containerdBinaryFlag, Address: addressFlag, TTRPCAddress: ttrpcAddress, } - address, err := service.StartShim(ctx, opts) + + address, err := manager.Start(ctx, id, opts) if err != nil { return err } @@ -242,46 +337,120 @@ func run(id string, initFunc Init, config Config) error { return err } return nil - default: - if !config.NoSetupLogger { - if err := setLogger(ctx, idFlag); err != nil { - return err - } + } + + if !config.NoSetupLogger { + ctx, err = setLogger(ctx, id) + if err != nil { + return err } - client := NewShimClient(ctx, service, signals) - if err := client.Serve(); err != nil { - if err != context.Canceled { - return err + } + + plugin.Register(&plugin.Registration{ + Type: plugin.InternalPlugin, + ID: "shutdown", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return sd, nil + }, + }) + + // Register event plugin + plugin.Register(&plugin.Registration{ + Type: plugin.EventPlugin, + ID: "publisher", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + return publisher, nil + }, + }) + + var ( + initialized = plugin.NewPluginSet() + ttrpcServices = []ttrpcService{} + ) + plugins := plugin.Graph(func(*plugin.Registration) bool { return false }) + for _, p := range plugins { + id := p.URI() + log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id) + + initContext := plugin.NewContext( + ctx, + p, + initialized, + // NOTE: Root is empty since the shim does not support persistent storage, + // shim plugins should make use state directory for writing files to disk. + // The state directory will be destroyed when the shim if cleaned up or + // on reboot + "", + bundlePath, + ) + initContext.Address = addressFlag + initContext.TTRPCAddress = ttrpcAddress + + // load the plugin specific configuration if it is provided + //TODO: Read configuration passed into shim, or from state directory? + //if p.Config != nil { + // pc, err := config.Decode(p) + // if err != nil { + // return nil, err + // } + // initContext.Config = pc + //} + + result := p.Init(initContext) + if err := initialized.Add(result); err != nil { + return fmt.Errorf("could not add plugin result to plugin set: %w", err) + } + + instance, err := result.Instance() + if err != nil { + if plugin.IsSkipPlugin(err) { + log.G(ctx).WithError(err).WithField("type", p.Type).Infof("skip loading plugin %q...", id) + } else { + log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id) } + continue } - // NOTE: If the shim server is down(like oom killer), the address - // socket might be leaking. - if address, err := ReadAddress("address"); err == nil { - _ = RemoveSocket(address) + if src, ok := instance.(ttrpcService); ok { + logrus.WithField("id", id).Debug("registering ttrpc service") + ttrpcServices = append(ttrpcServices, src) + } + } + + server, err := newServer() + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + return fmt.Errorf("failed creating server: %w", err) + } + + for _, srv := range ttrpcServices { + if err := srv.RegisterTTRPC(server); err != nil { + return fmt.Errorf("failed to register service: %w", err) } + } - select { - case <-publisher.Done(): - return nil - case <-time.After(5 * time.Second): - return errors.New("publisher not closed") + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != shutdown.ErrShutdown { + return err } } -} -// NewShimClient creates a new shim server client -func NewShimClient(ctx context.Context, svc shimapi.TaskService, signals chan os.Signal) *Client { - s := &Client{ - service: svc, - context: ctx, - signals: signals, + // NOTE: If the shim server is down(like oom killer), the address + // socket might be leaking. + if address, err := ReadAddress("address"); err == nil { + _ = RemoveSocket(address) + } + + select { + case <-publisher.Done(): + return nil + case <-time.After(5 * time.Second): + return errors.New("publisher not closed") } - return s } -// Serve the shim server -func (s *Client) Serve() error { +// serve serves the ttrpc API over a unix socket in the current working directory +// and blocks until the context is canceled +func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, shutdown func()) error { dump := make(chan os.Signal, 32) setupDumpStacks(dump) @@ -289,18 +458,19 @@ func (s *Client) Serve() error { if err != nil { return err } - server, err := newServer() - if err != nil { - return errors.Wrap(err, "failed creating server") - } - logrus.Debug("registering ttrpc server") - shimapi.RegisterTaskService(server, s.service) - - if err := serve(s.context, server, socketFlag); err != nil { + l, err := serveListener(socketFlag) + if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error return err } - logger := logrus.WithFields(logrus.Fields{ + go func() { + defer l.Close() + if err := server.Serve(ctx, l); err != nil && + !strings.Contains(err.Error(), "use of closed network connection") { + log.G(ctx).WithError(err).Fatal("containerd-shim: ttrpc server failure") + } + }() + logger := log.G(ctx).WithFields(logrus.Fields{ "pid": os.Getpid(), "path": path, "namespace": namespaceFlag, @@ -310,24 +480,9 @@ func (s *Client) Serve() error { dumpStacks(logger) } }() - return handleSignals(s.context, logger, s.signals) -} -// serve serves the ttrpc API over a unix socket at the provided path -// this function does not block -func serve(ctx context.Context, server *ttrpc.Server, path string) error { - l, err := serveListener(path) - if err != nil { - return err - } - go func() { - defer l.Close() - if err := server.Serve(ctx, l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("containerd-shim: ttrpc server failure") - } - }() - return nil + go handleExitSignals(ctx, logger, shutdown) + return reap(ctx, logger, signals) } func dumpStacks(logger *logrus.Entry) { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go index 314b45cb548..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_darwin.go @@ -1,5 +1,3 @@ -// +build darwin - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go index 4bc636280d0..fe833df01e9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_freebsd.go @@ -1,5 +1,3 @@ -// +build freebsd - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go index a61b6420892..e2dab0931ea 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -20,6 +21,7 @@ package shim import ( "context" + "fmt" "io" "net" "os" @@ -28,7 +30,6 @@ import ( "github.com/containerd/containerd/sys/reaper" "github.com/containerd/fifo" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -59,7 +60,7 @@ func serveListener(path string) (net.Listener, error) { path = "[inherited from parent]" } else { if len(path) > socketPathLimit { - return nil, errors.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) + return nil, fmt.Errorf("%q: unix socket path too long (> %d)", path, socketPathLimit) } l, err = net.Listen("unix", path) } @@ -70,7 +71,7 @@ func serveListener(path string) (net.Listener, error) { return l, nil } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { logger.Info("starting signal loop") for { @@ -78,6 +79,8 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si case <-ctx.Done(): return ctx.Err() case s := <-signals: + // Exit signals are handled separately from this loop + // They get registered with this channel so that we can ignore such signals for short-running actions (e.g. `delete`) switch s { case unix.SIGCHLD: if err := reaper.Reap(); err != nil { @@ -89,6 +92,22 @@ func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Si } } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { + ch := make(chan os.Signal, 32) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + + for { + select { + case s := <-ch: + logger.WithField("signal", s).Debugf("Caught exit signal") + cancel() + return + case <-ctx.Done(): + return + } + } +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return fifo.OpenFifoDup2(ctx, "log", unix.O_WRONLY, 0700, int(os.Stderr.Fd())) } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 7339eb2a2e4..4b098ab1630 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -20,12 +18,12 @@ package shim import ( "context" + "errors" "io" "net" "os" "github.com/containerd/ttrpc" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -48,10 +46,13 @@ func serveListener(path string) (net.Listener, error) { return nil, errors.New("not supported") } -func handleSignals(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { +func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { return errors.New("not supported") } +func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { +} + func openLog(ctx context.Context, _ string) (io.Writer, error) { return nil, errors.New("not supported") } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index 52bfaa9ce71..28ac9d1e799 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -19,26 +19,32 @@ package shim import ( "bytes" "context" + "errors" "fmt" - "io/ioutil" "net" "os" - "os/exec" "path/filepath" "strings" - "sync" "time" "github.com/containerd/containerd/namespaces" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" ) -var runtimePaths sync.Map +type CommandConfig struct { + Runtime string + Address string + TTRPCAddress string + Path string + SchedCore bool + Args []string + Opts *types.Any +} // Command returns the shim command with the provided args and configuration -func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAddress, path string, opts *types.Any, cmdArgs ...string) (*exec.Cmd, error) { +func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err @@ -49,67 +55,23 @@ func Command(ctx context.Context, runtime, containerdAddress, containerdTTRPCAdd } args := []string{ "-namespace", ns, - "-address", containerdAddress, + "-address", config.Address, "-publish-binary", self, } - args = append(args, cmdArgs...) - name := BinaryName(runtime) - if name == "" { - return nil, fmt.Errorf("invalid runtime name %s, correct runtime name should format like io.containerd.runc.v1", runtime) - } - - var cmdPath string - cmdPathI, cmdPathFound := runtimePaths.Load(name) - if cmdPathFound { - cmdPath = cmdPathI.(string) - } else { - var lerr error - binaryPath := BinaryPath(runtime) - if _, serr := os.Stat(binaryPath); serr == nil { - cmdPath = binaryPath - } - - if cmdPath == "" { - if cmdPath, lerr = exec.LookPath(name); lerr != nil { - if eerr, ok := lerr.(*exec.Error); ok { - if eerr.Err == exec.ErrNotFound { - // LookPath only finds current directory matches based on - // the callers current directory but the caller is not - // likely in the same directory as the containerd - // executables. Instead match the calling binaries path - // (containerd) and see if they are side by side. If so - // execute the shim found there. - testPath := filepath.Join(filepath.Dir(self), name) - if _, serr := os.Stat(testPath); serr == nil { - cmdPath = testPath - } - if cmdPath == "" { - return nil, errors.Wrapf(os.ErrNotExist, "runtime %q binary not installed %q", runtime, name) - } - } - } - } - } - cmdPath, err = filepath.Abs(cmdPath) - if err != nil { - return nil, err - } - if cmdPathI, cmdPathFound = runtimePaths.LoadOrStore(name, cmdPath); cmdPathFound { - // We didn't store cmdPath we loaded an already cached value. Use it. - cmdPath = cmdPathI.(string) - } - } - - cmd := exec.Command(cmdPath, args...) - cmd.Dir = path + args = append(args, config.Args...) + cmd := exec.CommandContext(ctx, config.Runtime, args...) + cmd.Dir = config.Path cmd.Env = append( os.Environ(), "GOMAXPROCS=2", - fmt.Sprintf("%s=%s", ttrpcAddressEnv, containerdTTRPCAddress), + fmt.Sprintf("%s=%s", ttrpcAddressEnv, config.TTRPCAddress), ) + if config.SchedCore { + cmd.Env = append(cmd.Env, "SCHED_CORE=1") + } cmd.SysProcAttr = getSysProcAttr() - if opts != nil { - d, err := proto.Marshal(opts) + if config.Opts != nil { + d, err := proto.Marshal(config.Opts) if err != nil { return nil, err } @@ -196,7 +158,7 @@ func ReadAddress(path string) (string, error) { if err != nil { return "", err } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index f956b0986bd..4e2309a8069 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -29,10 +30,9 @@ import ( "syscall" "time" + "github.com/containerd/containerd/defaults" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/sys" - "github.com/pkg/errors" ) const ( @@ -53,16 +53,16 @@ func AdjustOOMScore(pid int) error { parent := os.Getppid() score, err := sys.GetOOMScoreAdj(parent) if err != nil { - return errors.Wrap(err, "get parent OOM score") + return fmt.Errorf("get parent OOM score: %w", err) } shimScore := score + 1 if err := sys.AdjustOOMScore(pid, shimScore); err != nil { - return errors.Wrap(err, "set shim OOM score") + return fmt.Errorf("set shim OOM score: %w", err) } return nil } -const socketRoot = "/run/containerd" +const socketRoot = defaults.DefaultStateDir // SocketAddress returns a socket address func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { @@ -76,7 +76,7 @@ func SocketAddress(ctx context.Context, socketPath, id string) (string, error) { // AnonDialer returns a dialer for a socket func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { - return dialer.Dialer(socket(address).path(), timeout) + return net.DialTimeout("unix", socket(address).path(), timeout) } // AnonReconnectDialer returns a dialer for an existing socket on reconnection @@ -90,19 +90,25 @@ func NewSocket(address string) (*net.UnixListener, error) { sock = socket(address) path = sock.path() ) - if !sock.isAbstract() { + + isAbstract := sock.isAbstract() + + if !isAbstract { if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, errors.Wrapf(err, "%s", path) + return nil, fmt.Errorf("%s: %w", path, err) } } l, err := net.Listen("unix", path) if err != nil { return nil, err } - if err := os.Chmod(path, 0600); err != nil { - os.Remove(sock.path()) - l.Close() - return nil, err + + if !isAbstract { + if err := os.Chmod(path, 0600); err != nil { + os.Remove(sock.path()) + l.Close() + return nil, err + } } return l.(*net.UnixListener), nil } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go index 325c290043f..b9042841b1a 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_windows.go @@ -18,13 +18,13 @@ package shim import ( "context" + "fmt" "net" "os" "syscall" "time" winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" ) const shimBinaryFormat = "containerd-shim-%s-%s.exe" @@ -40,9 +40,9 @@ func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error c, err := winio.DialPipeContext(ctx, address) if os.IsNotExist(err) { - return nil, errors.Wrap(os.ErrNotExist, "npipe not found on reconnect") + return nil, fmt.Errorf("npipe not found on reconnect: %w", os.ErrNotExist) } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } else if err != nil { return nil, err } @@ -65,14 +65,14 @@ func AnonDialer(address string, timeout time.Duration) (net.Conn, error) { if os.IsNotExist(err) { select { case <-serveTimer.C: - return nil, errors.Wrap(os.ErrNotExist, "pipe not found before timeout") + return nil, fmt.Errorf("pipe not found before timeout: %w", os.ErrNotExist) default: // Wait 10ms for the shim to serve and try again. time.Sleep(10 * time.Millisecond) continue } } else if err == context.DeadlineExceeded { - return nil, errors.Wrapf(err, "timed out waiting for npipe %s", address) + return nil, fmt.Errorf("timed out waiting for npipe %s: %w", address, err) } return nil, err } diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go index 28d6c2cabc6..73a57013ff1 100644 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ b/vendor/github.com/containerd/containerd/sys/epoll.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go index db3cf702f49..a71a9cd7e93 100644 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ b/vendor/github.com/containerd/containerd/sys/fds.go @@ -1,3 +1,4 @@ +//go:build !windows && !darwin // +build !windows,!darwin /* @@ -19,14 +20,14 @@ package sys import ( - "io/ioutil" + "os" "path/filepath" "strconv" ) // GetOpenFds returns the number of open fds for the process provided by pid func GetOpenFds(pid int) (int, error) { - dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) if err != nil { return -1, err } diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index d8329af9fbc..805a7a736fa 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index a9198ef399a..87ebacc2006 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. @@ -19,6 +17,7 @@ package sys import ( + "fmt" "os" "path/filepath" "regexp" @@ -29,7 +28,6 @@ import ( "unsafe" "github.com/Microsoft/hcsshim" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -270,7 +268,7 @@ func ForceRemoveAll(path string) error { snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { if err := cleanupWCOWLayers(snapshotDir); err != nil { - return errors.Wrapf(err, "failed to cleanup WCOW layers in %s", snapshotDir) + return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) } } @@ -280,12 +278,22 @@ func ForceRemoveAll(path string) error { func cleanupWCOWLayers(root string) error { // See snapshots/windows/windows.go getSnapshotDir() var layerNums []int + var rmLayerNums []int if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if path != root && info.IsDir() { - if layerNum, err := strconv.Atoi(filepath.Base(path)); err == nil { - layerNums = append(layerNums, layerNum) + name := filepath.Base(path) + if strings.HasPrefix(name, "rm-") { + layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) + if err != nil { + return err + } + rmLayerNums = append(rmLayerNums, layerNum) } else { - return err + layerNum, err := strconv.Atoi(name) + if err != nil { + return err + } + layerNums = append(layerNums, layerNum) } return filepath.SkipDir } @@ -295,8 +303,14 @@ func cleanupWCOWLayers(root string) error { return err } - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) + sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) + for _, rmLayerNum := range rmLayerNums { + if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { + return err + } + } + sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) for _, layerNum := range layerNums { if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { return err @@ -311,19 +325,20 @@ func cleanupWCOWLayer(layerPath string) error { HomeDir: filepath.Dir(layerPath), } - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared. + // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. + // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || hcserror.Err != windows.ERROR_DEV_NOT_EXIST { - return errors.Wrapf(err, "failed to unprepare %s", layerPath) + if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { + return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) } } if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to deactivate %s", layerPath) + return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) } if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return errors.Wrapf(err, "failed to destroy %s", layerPath) + return fmt.Errorf("failed to destroy %s: %w", layerPath, err) } return nil diff --git a/vendor/github.com/containerd/containerd/sys/mount_linux.go b/vendor/github.com/containerd/containerd/sys/mount_linux.go deleted file mode 100644 index a21045529ae..00000000000 --- a/vendor/github.com/containerd/containerd/sys/mount_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "runtime" - "syscall" - "unsafe" - - "github.com/containerd/containerd/log" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// FMountat performs mount from the provided directory. -func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { - var ( - sourceP, targetP, fstypeP, dataP *byte - pid uintptr - err error - errno, status syscall.Errno - ) - - sourceP, err = syscall.BytePtrFromString(source) - if err != nil { - return err - } - - targetP, err = syscall.BytePtrFromString(target) - if err != nil { - return err - } - - fstypeP, err = syscall.BytePtrFromString(fstype) - if err != nil { - return err - } - - if data != "" { - dataP, err = syscall.BytePtrFromString(data) - if err != nil { - return err - } - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var pipefds [2]int - if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return errors.Wrap(err, "failed to open pipe") - } - - defer func() { - // close both ends of the pipe in a deferred function, since open file - // descriptor table is shared with child - syscall.Close(pipefds[0]) - syscall.Close(pipefds[1]) - }() - - pid, errno = forkAndMountat(dirfd, - uintptr(unsafe.Pointer(sourceP)), - uintptr(unsafe.Pointer(targetP)), - uintptr(unsafe.Pointer(fstypeP)), - flags, - uintptr(unsafe.Pointer(dataP)), - pipefds[1], - ) - - if errno != 0 { - return errors.Wrap(errno, "failed to fork thread") - } - - defer func() { - _, err := unix.Wait4(int(pid), nil, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), nil, 0, nil) - } - - if err != nil { - log.L.WithError(err).Debugf("failed to find pid=%d process", pid) - } - }() - - _, _, errno = syscall.RawSyscall(syscall.SYS_READ, - uintptr(pipefds[0]), - uintptr(unsafe.Pointer(&status)), - unsafe.Sizeof(status)) - if errno != 0 { - return errors.Wrap(errno, "failed to read pipe") - } - - if status != 0 { - return errors.Wrap(status, "failed to mount") - } - - return nil -} - -// forkAndMountat will fork thread, change working dir and mount. -// -// precondition: the runtime OS thread must be locked. -func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { - - // block signal during clone - beforeFork() - - // the cloned thread shares the open file descriptor, but the thread - // never be reused by runtime. - pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) - if errno != 0 || pid != 0 { - // restore all signals - afterFork() - return - } - - // restore all signals - afterForkInChild() - - // change working dir - _, _, errno = syscall.RawSyscall(syscall.SYS_FCHDIR, dirfd, 0, 0) - if errno != 0 { - goto childerr - } - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) - -childerr: - _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") -} diff --git a/vendor/github.com/containerd/containerd/sys/oom_linux.go b/vendor/github.com/containerd/containerd/sys/oom_linux.go index 82a347c6f72..bb2a3eafb4f 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_linux.go +++ b/vendor/github.com/containerd/containerd/sys/oom_linux.go @@ -18,7 +18,6 @@ package sys import ( "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -69,7 +68,7 @@ func SetOOMScore(pid, score int) error { // no oom score is set, or a sore is set to 0. func GetOOMScoreAdj(pid int) (int, error) { path := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go index f5d7e9786b8..fa0db5a10e0 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go index 0033178dfa7..6c4f13b9088 100644 --- a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go +++ b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,12 +20,14 @@ package reaper import ( - "os/exec" + "errors" + "fmt" "sync" + "syscall" "time" runc "github.com/containerd/go-runc" - "github.com/pkg/errors" + exec "golang.org/x/sys/execabs" "golang.org/x/sys/unix" ) @@ -115,6 +118,38 @@ func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) { return -1, ErrNoSuchProcess } +// WaitTimeout is used to skip the blocked command and kill the left process. +func (m *Monitor) WaitTimeout(c *exec.Cmd, ec chan runc.Exit, timeout time.Duration) (int, error) { + type exitStatusWrapper struct { + status int + err error + } + + // capacity can make sure that the following goroutine will not be + // blocked if there is no receiver when timeout. + waitCh := make(chan *exitStatusWrapper, 1) + go func() { + defer close(waitCh) + + status, err := m.Wait(c, ec) + waitCh <- &exitStatusWrapper{ + status: status, + err: err, + } + }() + + timer := time.NewTimer(timeout) + defer timer.Stop() + + select { + case <-timer.C: + syscall.Kill(c.Process.Pid, syscall.SIGKILL) + return 0, fmt.Errorf("timeout %v for cmd(pid=%d): %s, %s", timeout, c.Process.Pid, c.Path, c.Args) + case res := <-waitCh: + return res.status, res.err + } +} + // Subscribe to process exit changes func (m *Monitor) Subscribe() chan runc.Exit { c := make(chan runc.Exit, bufferSize) diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index b67cc1fa3a7..367e19cad84 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -19,11 +20,11 @@ package sys import ( + "fmt" "net" "os" "path/filepath" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -31,7 +32,7 @@ import ( func CreateUnixSocket(path string) (net.Listener, error) { // BSDs have a 104 limit if len(path) > 104 { - return nil, errors.Errorf("%q: unix socket path too long (> 104)", path) + return nil, fmt.Errorf("%q: unix socket path too long (> 104)", path) } if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go index 3ee7679b49b..1ae12bc5114 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_windows.go +++ b/vendor/github.com/containerd/containerd/sys/socket_windows.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/vendor/github.com/containerd/containerd/sys/stat_bsd.go deleted file mode 100644 index 4f03cd6cb0f..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_bsd.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the access time from a stat struct -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atimespec -} - -// StatCtime returns the created time from a stat struct -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctimespec -} - -// StatMtime returns the modified time from a stat struct -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtimespec -} - -// StatATimeAsTime returns the access time as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go b/vendor/github.com/containerd/containerd/sys/stat_openbsd.go deleted file mode 100644 index ec3b9df69aa..00000000000 --- a/vendor/github.com/containerd/containerd/sys/stat_openbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sys - -import ( - "syscall" - "time" -) - -// StatAtime returns the Atim -func StatAtime(st *syscall.Stat_t) syscall.Timespec { - return st.Atim -} - -// StatCtime returns the Ctim -func StatCtime(st *syscall.Stat_t) syscall.Timespec { - return st.Ctim -} - -// StatMtime returns the Mtim -func StatMtime(st *syscall.Stat_t) syscall.Timespec { - return st.Mtim -} - -// StatATimeAsTime returns st.Atim as a time.Time -func StatATimeAsTime(st *syscall.Stat_t) time.Time { - // The int64 conversions ensure the line compiles for 32-bit systems as well. - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index b8200307f3a..07e9f9ccc2c 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.5.8+unknown" + Version = "1.6.2+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 708b2668990..9ee97fc9110 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,6 +26,7 @@ import ( "archive/tar" "bytes" "compress/gzip" + "errors" "fmt" "io" "io/ioutil" @@ -38,7 +39,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -142,7 +142,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { - rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err) + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } }() @@ -307,7 +307,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri // Import tar file. intar, err := importTar(in) if err != nil { - return nil, errors.Wrap(err, "failed to sort") + return nil, fmt.Errorf("failed to sort: %w", err) } // Sort the tar file respecting to the prioritized files list. @@ -318,7 +318,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri *missedPrioritized = append(*missedPrioritized, l) continue // allow not found } - return nil, errors.Wrap(err, "failed to sort tar entries") + return nil, fmt.Errorf("failed to sort tar entries: %w", err) } } if len(prioritized) == 0 { @@ -371,7 +371,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} pw, err := newCountReader(in) if err != nil { - return nil, errors.Wrap(err, "failed to make position watcher") + return nil, fmt.Errorf("failed to make position watcher: %w", err) } tr := tar.NewReader(pw) @@ -383,7 +383,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err == io.EOF { break } else { - return nil, errors.Wrap(err, "failed to parse tar file") + return nil, fmt.Errorf("failed to parse tar file, %w", err) } } switch cleanEntryName(h.Name) { @@ -420,7 +420,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error { _, okIn := in.get(name) _, okOut := out.get(name) if !okIn && !okOut { - return errors.Wrapf(errNotFound, "file: %q", name) + return fmt.Errorf("file: %q: %w", name, errNotFound) } parent, _ := path.Split(strings.TrimSuffix(name, "/")) diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 3ef02911607..4b655c14532 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -27,6 +27,7 @@ import ( "bytes" "compress/gzip" "crypto/sha256" + "errors" "fmt" "hash" "io" @@ -40,7 +41,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/vbatts/tar-split/archive/tar" ) @@ -107,7 +107,7 @@ type Telemetry struct { } // Open opens a stargz file for reading. -// The behaviour is configurable using options. +// The behavior is configurable using options. // // Note that each entry name is normalized as the path that is relative to root. func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { @@ -118,7 +118,7 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { } } - gzipCompressors := []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} decompressors := append(gzipCompressors, opts.decompressors...) // Determine the size to fetch. Try to fetch as many bytes as possible. @@ -184,7 +184,7 @@ func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr e return 0, 0, fmt.Errorf("error reading footer: %v", err) } var allErr []error - for _, d := range []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} { + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { fSize := d.FooterSize() fOffset := positive(int64(len(footer)) - fSize) _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) @@ -279,12 +279,12 @@ func (r *Reader) initFields() error { pdir := r.getOrCreateDir(pdirName) ent.NumLink++ // at least one name(ent.Name) references this entry. if ent.Type == "hardlink" { - if org, ok := r.m[cleanEntryName(ent.LinkName)]; ok { - org.NumLink++ // original entry is referenced by this ent.Name. - ent = org - } else { - return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + org, err := r.getSource(ent) + if err != nil { + return err } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org } pdir.addChild(path.Base(name), ent) } @@ -303,6 +303,20 @@ func (r *Reader) initFields() error { return nil } +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + func parentDir(p string) string { dir, _ := path.Split(p) return strings.TrimSuffix(dir, "/") @@ -371,8 +385,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.Digest != "" { d, err := digest.Parse(e.Digest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse regular file digest %q", e.Digest) + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) } regDigestMap[e.Offset] = d } else { @@ -387,8 +400,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) { if e.ChunkDigest != "" { d, err := digest.Parse(e.ChunkDigest) if err != nil { - return nil, errors.Wrapf(err, - "failed to parse chunk digest %q", e.ChunkDigest) + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) } chunkDigestMap[e.Offset] = d } else { @@ -464,7 +476,11 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { } e, ok = r.m[path] if ok && e.Type == "hardlink" { - e, ok = r.m[e.LinkName] + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } } return } @@ -629,7 +645,7 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { } blobPayloadSize, _, _, err := c.ParseFooter(footer) if err != nil { - return nil, errors.Wrapf(err, "failed to parse footer") + return nil, fmt.Errorf("failed to parse footer: %w", err) } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 88e1283d853..591d7a62e11 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -34,34 +34,37 @@ import ( "strconv" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type gzipCompression struct { - *gzipCompressor + *GzipCompressor *GzipDecompressor } func newGzipCompressionWithLevel(level int) Compression { return &gzipCompression{ - &gzipCompressor{level}, + &GzipCompressor{level}, &GzipDecompressor{}, } } -func NewGzipCompressorWithLevel(level int) Compressor { - return &gzipCompressor{level} +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} } -type gzipCompressor struct { +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { compressionLevel int } -func (gc *gzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { +func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, gc.compressionLevel) } -func (gc *gzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { tocJSON, err := json.MarshalIndent(toc, "", "\t") if err != nil { return "", err @@ -146,7 +149,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t } tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } @@ -155,23 +158,27 @@ func (gz *GzipDecompressor) FooterSize() int64 { return FooterSize } -type legacyGzipDecompressor struct{} +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} -func (gz *legacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) } -func (gz *legacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { return parseTOCEStargz(r) } -func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { if len(p) != legacyFooterSize { return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) } zr, err := gzip.NewReader(bytes.NewReader(p)) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader") + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() extra := zr.Header.Extra @@ -183,34 +190,48 @@ func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff } tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) if err != nil { - return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) } return tocOffset, tocOffset, 0, nil } -func (gz *legacyGzipDecompressor) FooterSize() int64 { +func (gz *LegacyGzipDecompressor) FooterSize() int64 { return legacyFooterSize } +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { zr, err := gzip.NewReader(r) if err != nil { - return nil, "", fmt.Errorf("malformed TOC gzip header: %v", err) + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) } - defer zr.Close() zr.Multistream(false) tr := tar.NewReader(zr) h, err := tr.Next() if err != nil { - return nil, "", fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) } if h.Name != TOCTarName { - return nil, "", fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) } - dgstr := digest.Canonical.Digester() - toc = new(JTOC) - if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { - return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) - } - return toc, dgstr.Digest(), nil + return readCloser{tr, zr.Close}, nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 9224e456dde..1de13a4705b 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -28,6 +28,7 @@ import ( "compress/gzip" "crypto/sha256" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -41,7 +42,6 @@ import ( "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) // TestingController is Compression with some helper methods necessary for testing. @@ -1062,18 +1062,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT fSize := controller.FooterSize() footer := make([]byte, fSize) if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { - return nil, 0, errors.Wrap(err, "error reading footer") + return nil, 0, fmt.Errorf("error reading footer: %w", err) } _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) if err != nil { - return nil, 0, errors.Wrapf(err, "failed to parse footer") + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) } // Decode the TOC JSON tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { - return nil, 0, errors.Wrap(err, "failed to parse TOC") + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) } return decodedJTOC, tocOffset, nil } diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 8bad5b1115e..842076fa0d6 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -6,7 +6,7 @@ env: #### Global variables used for all tasks #### # Name of the ultimate destination branch for this CI run, PR or post-merge. - DEST_BRANCH: "release-1.23" + DEST_BRANCH: "main" GOPATH: "/var/tmp/go" GOSRC: "${GOPATH}/src/github.com/containers/buildah" # Overrides default location (/tmp/cirrus) for repo clone @@ -19,22 +19,22 @@ env: CIRRUS_CLONE_DEPTH: 50 # Unless set by in_podman.sh, default to operating outside of a podman container IN_PODMAN: 'false' + # root or rootless + PRIV_NAME: root #### #### Cache-image names to test with #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" - UBUNTU_NAME: "ubuntu-2104" - PRIOR_UBUNTU_NAME: "ubuntu-2010" + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" + UBUNTU_NAME: "ubuntu-2110" - IMAGE_SUFFIX: "c6248193773010944" + IMAGE_SUFFIX: "c4764556961513472" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" - PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}" IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}" @@ -76,7 +76,6 @@ meta_task: ${FEDORA_CACHE_IMAGE_NAME} ${PRIOR_FEDORA_CACHE_IMAGE_NAME} ${UBUNTU_CACHE_IMAGE_NAME} - ${PRIOR_UBUNTU_CACHE_IMAGE_NAME} BUILDID: "${CIRRUS_BUILD_ID}" REPOREF: "${CIRRUS_CHANGE_IN_REPO}" GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14] @@ -131,13 +130,35 @@ vendor_task: - './hack/tree_status.sh' +# Confirm cross-compile ALL architectures on a Mac OS-X VM. +cross_build_task: + name: "Cross Compile" + alias: cross_build + only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' + + osx_instance: + image: 'big-sur-base' + + script: + - brew update + - brew install go + - brew install go-md2man + - brew install gpgme + - go version + - make cross CGO_ENABLED=0 + + binary_artifacts: + path: ./bin/* + + unit_task: name: 'Unit tests w/ $STORAGE_DRIVER' alias: unit - only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' - depends_on: + only_if: *not_docs + depends_on: &smoke_vendor_cross - smoke - vendor + - cross_build timeout_in: 1h @@ -159,8 +180,7 @@ conformance_task: name: 'Build Conformance w/ $STORAGE_DRIVER' alias: conformance only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross gce_instance: image_name: "${UBUNTU_CACHE_IMAGE_NAME}" @@ -177,85 +197,11 @@ conformance_task: conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}' -# Confirm cross-compile ALL architectures on a Mac OS-X VM. -cross_build_task: - name: "Cross Compile" - alias: cross_build - only_if: *not_docs - depends_on: - - unit - - osx_instance: - image: 'big-sur-base' - - script: - - brew update - - brew install go - - brew install go-md2man - - brew install gpgme - - go version - - make cross CGO_ENABLED=0 - - binary_artifacts: - path: ./bin/* - - -static_build_task: - name: "Static Build" - alias: static_build - only_if: *not_docs - depends_on: - - unit - - gce_instance: - image_name: "${FEDORA_CACHE_IMAGE_NAME}" - cpu: 8 - memory: 12 - disk: 200 - - env: - NIX_FQIN: "docker.io/nixos/nix:latest" - - init_script: | - set -ex - setenforce 0 - growpart /dev/sda 1 || true - resize2fs /dev/sda1 || true - yum -y install podman - - nix_cache: - folder: '.cache' - fingerprint_script: cat nix/* - - build_script: | - set -ex - mkdir -p .cache - mv .cache /nix - if [[ -z $(ls -A /nix) ]]; then - podman run --rm --privileged -i -v /:/mnt \ - $NIX_FQIN \ - cp -rfT /nix /mnt/nix - fi - podman run --rm --privileged -i -v /nix:/nix \ - -v ${PWD}:${PWD} -w ${PWD} \ - $NIX_FQIN \ - nix --print-build-logs --option cores 8 \ - --option max-jobs 8 build --file nix/ - - binaries_artifacts: - path: "result/bin/buildah" - - save_cache_script: | - mv /nix .cache - chown -Rf $(whoami) .cache - - integration_task: name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER" alias: integration only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross matrix: # VFS @@ -271,10 +217,6 @@ integration_task: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'vfs' - - env: - DISTRO_NV: "${PRIOR_UBUNTU_NAME}" - IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'vfs' # OVERLAY - env: DISTRO_NV: "${FEDORA_NAME}" @@ -288,10 +230,6 @@ integration_task: DISTRO_NV: "${UBUNTU_NAME}" IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' - - env: - DISTRO_NV: "${PRIOR_UBUNTU_NAME}" - IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" - STORAGE_DRIVER: 'overlay' gce_instance: image_name: "$IMAGE_NAME" @@ -314,13 +252,50 @@ integration_task: package_versions_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh packages' golang_version_script: '$GOSRC/$SCRIPT_BASE/logcollector.sh golang' +integration_rootless_task: + name: "Integration rootless $DISTRO_NV w/ $STORAGE_DRIVER" + alias: integration_rootless + only_if: *not_docs + depends_on: *smoke_vendor_cross + + matrix: + # Running rootless tests on overlay + # OVERLAY + - env: + DISTRO_NV: "${FEDORA_NAME}" + IMAGE_NAME: "${FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + - env: + DISTRO_NV: "${PRIOR_FEDORA_NAME}" + IMAGE_NAME: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + - env: + DISTRO_NV: "${UBUNTU_NAME}" + IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}" + STORAGE_DRIVER: 'overlay' + PRIV_NAME: rootless + + gce_instance: + image_name: "$IMAGE_NAME" + + # Separate scripts for separate outputs, makes debugging easier. + setup_script: '${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' + build_script: '${SCRIPT_BASE}/build.sh |& ${_TIMESTAMP}' + integration_test_script: '${SCRIPT_BASE}/test.sh integration |& ${_TIMESTAMP}' + + binary_artifacts: + path: ./bin/* + + always: + <<: *standardlogs in_podman_task: name: "Containerized Integration" alias: in_podman only_if: *not_docs - depends_on: - - unit + depends_on: *smoke_vendor_cross env: # This is key, cause the scripts to re-execute themselves inside a container. @@ -356,7 +331,6 @@ success_task: - cross_build - integration - in_podman - - static_build container: image: "quay.io/libpod/alpine:latest" diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore index d98205316c2..939ce6ef52d 100644 --- a/vendor/github.com/containers/buildah/.gitignore +++ b/vendor/github.com/containers/buildah/.gitignore @@ -1,11 +1,12 @@ docs/buildah*.1 +docs/*.5 /bin /buildah /imgtype /build/ -tests/tools/build +/tests/tools/build Dockerfile* !/tests/bud/*/Dockerfile* !/tests/conformance/**/Dockerfile* *.swp -result +/result/ diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml index 0c7e3100793..af0b10c76b2 100644 --- a/vendor/github.com/containers/buildah/.golangci.yml +++ b/vendor/github.com/containers/buildah/.golangci.yml @@ -7,18 +7,7 @@ run: # Don't exceed number of threads available when running under CI concurrency: 4 linters: - enable-all: true - disable: - # All these break for one reason or another - - dupl - - funlen - - gochecknoglobals - - gochecknoinits - - goconst - - gocritic - - gocyclo - - gosec - - lll - - maligned - - prealloc - - scopelint + enable: + - revive + - unconvert + - unparam diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 685c4f4d36a..82cee5080fa 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,10 +2,249 @@ # Changelog -## v1.23.1 (2021-09-27) +## v1.25.1 (2022-03-30) - Vendor containers/common v0.44.2 - post-1.23 branch fixups + buildah: create WORKDIR with USER permissions + vendor: update github.com/openshift/imagebuilder + copier: attempt to open the dir before adding it + Updated dependabot to get updates for GitHub actions. + Switch most calls to filepath.Walk to filepath.WalkDir + build: allow --no-cache and --layers so build cache can be overrided + build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + Bump to v1.26.0-dev + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + +## v1.25.0 (2022-03-25) + + install: drop RHEL/CentOS 7 doc + build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5 + Bump c/storage to v1.39.0 in main + Add a test for CVE-2022-27651 + build(deps): bump github.com/docker/docker + Bump github.com/prometheus/client_golang to v1.11.1 + [CI:DOCS] man pages: sort flags, and keep them that way + build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2 + Don't pollute + network setup: increase timeout to 4 minutes + do not set the inheritable capabilities + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3 + parse: convert exposed GetVolumes to internal only + buildkit: mount=type=cache support locking external cache store + .in support: improve error message when cpp is not installed + buildah image: install cpp + build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 + build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 + build(deps): bump github.com/docker/docker + Add --no-hosts flag to eliminate use of /etc/hosts within containers + test: remove skips for rootless users + test: unshare mount/umount if test is_rootless + tests/copy: read correct containers.conf + build(deps): bump github.com/docker/distribution + cirrus: add seperate task and matrix for rootless + tests: skip tests for rootless which need unshare + buildah: test rootless integration + vendor: bump c/storage to main/93ce26691863 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10 + tests/copy: initialize the network, too + [CI:DOCS] remove references to Kubic for CentOS and Ubuntu + build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1 + use c/image/pkg/blobcache + vendor c/image/v5@v5.20.0 + add: ensure the context directory is an absolute path + executor: docker builds must inherit healthconfig from base if any + docs: Remove Containerfile and containeringore + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9 + helpers.bash: Use correct syntax + speed up combination-namespaces test + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + Bump back to 1.25.0-dev + build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0 + +## v1.24.2 (2022-02-16) + + Increase subuid/subgid to 65535 + history: only add proxy vars to history if specified + run_linux: use --systemd-cgroup + buildah: new global option --cgroup-manager + Makefile: build with systemd when available + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + Bump c/common to v0.47.4 + Cirrus: Use updated VM images + conformance: add a few "replace-directory-with-symlink" tests + Bump back to v1.25.0-dev + +## v1.24.1 (2022-02-03) + + executor: Add support for inline --platform within Dockerfile + caps: fix buildah run --cap-add=all + Update vendor of openshift/imagebuilder + Bump version of containers/image and containers/common + Update vendor of containers/common + System tests: fix accidental vandalism of source dir + build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + imagebuildah.BuildDockerfiles(): create the jobs semaphore + build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + overlay: always honor mountProgram + overlay: move mount program invocation to separate function + overlay: move mount program lookup to separate function + Bump to v1.25.0-dev [NO TESTS NEEDED] + +## v1.24.0 (2022-01-26) + + Update vendor of containers/common + build(deps): bump github.com/golangci/golangci-lint in /tests/tools + Github-workflow: Report both failures and errors. + build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + Update docs/buildah-build.1.md + [CI:DOCS] Fix typos and improve language + buildah bud --network add support for custom networks + Make pull commands be consistent + docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + Vendor in latest containers/image + Run codespell on code + .github/dependabot.yml: add tests/tools go.mod + CI: rm git-validation, add GHA job to validate PRs + tests/tools: bump go-md2man to v2.0.1 + tests/tools/Makefile: simplify + tests/tools: bump onsi/ginkgo to v1.16.5 + vendor: bump c/common and others + mount: add support for custom upper and workdir with overlay mounts + linux: fix lookup for runtime + overlay: add MountWithOptions to API which extends support for advanced overlay + Allow processing of SystemContext from FlagSet + .golangci.yml: enable unparam linter + util/resolveName: rm bool return + tests/tools: bump golangci-lint + .gitignore: fixups + all: fix capabilities.NewPid deprecation warnings + bind/mount.go: fix linter comment + all: fix gosimple warning S1039 + tests/e2e/buildah_suite_test.go: fix gosimple warnings + imagebuildah/executor.go: fix gosimple warning + util.go: fix gosimple warning + build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + Enable git-daemon tests + Allow processing of id options from FlagSet + Cirrus: Re-order tasks for more parallelism + Cirrus: Freshen VM images + Fix platform handling for empty os/arch values + Allow processing of network options from FlagSet + Fix permissions on secrets directory + Update containers/image and containers/common + bud.bats: use a local git daemon for the git protocol test + Allow processing of common options from FlagSet + Cirrus: Run int. tests in parallel with unit + vendor c/common + Fix default CNI paths + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + multi-stage: enable mounting stages across each other with selinux enabled + executor: Share selinux label of first stage with other stages in a build + buildkit: add from field to bind and cache mounts so images can be used as source + Use config.ProxyEnv from containers/common + use libnetwork from c/common for networking + setup the netns in the buildah parent process + build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + build: fix libsubid test + Allow callers to replace the ContainerSuffix + parse: allow parsing anomaly non-human value for memory control group + .cirrus: remove static_build from ci + stage_executor: re-use all possible layers from cache for squashed builds + build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + Allow rootless buildah to set resource limits on cgroup V2 + build(deps): bump github.com/docker/docker + tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + Wire logger through to config + copier.Put: check for is-not-a-directory using lstat, not stat + Turn on rootless cgroupv2 tests + Grab all of the containers.conf settings for namespaces. + image: set MediaType in OCI manifests + copier: RemoveAll possibly-directories + Simple README fix + images: accept multiple filter with logical AND + build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + UPdate vendor of container/storage + build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + Make LocalIP public function so Podman can use it + Fix UnsetEnv for buildah bud + Tests should rely only on static/unchanging images + run: ensure that stdio pipes are labeled correctly + build(deps): bump github.com/docker/docker + Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + chroot: don't use the generate default seccomp filter for unit tests + build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + ssh-agent: Increase timeout before we explicitly close connection + docs/tutorials: update + Clarify that manifest defaults to localhost as the registry name + "config": remove a stray bit of debug output + "commit": fix a flag typo + Fix an error message: unlocking vs locking + Expand the godoc for CommonBuildOptions.Secrets + chroot: accept an "rw" option + Add --unsetenv option to buildah commit and build + define.TempDirForURL(): show CombinedOutput when a command fails + config: support the variant field + rootless: do not bind mount /sys if not needed + Fix tutorial to specify command on buildah run line + build: history should not contain ARG values + docs: Use guaranteed path for go-md2man + run: honor --network=none from builder if nothing specified + networkpolicy: Should be enabled instead of default when explictly set + Add support for env var secret sources + build(deps): bump github.com/docker/docker + fix: another non-portable shebang + Rootless containers users should use additional groups + Support overlayfs path contains colon + Report ignorefile location when no content added + Add support for host.containers.internal in the /etc/hosts + build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + imagebuildah: fix nil deref + buildkit: add support for mount=type=cache + Default secret mode to 400 + [CI:DOCS] Include manifest example usage + docs: update buildah-from, buildah-pull 'platform' option compatibility notes + docs: update buildah-build 'platform' option compatibility notes + De-dockerize the man page as much as possible + [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + docs: Fix and Update Containerfile man page with supported mount types + mount: add tmpcopyup to tmpfs mount option + buildkit: Add support for --mount=type=tmpfs + build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + Fix command doc links in README.md + build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + build: Add support for buildkit like --mount=type=bind + Bump containerd to v1.5.7 + build(deps): bump github.com/docker/docker + tests: stop pulling php, composer + Fix .containerignore link file + Cirrus: Fix defunct package metadata breaking cache + build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + buildah build: add --all-platforms + Add man page for Containerfile and .containerignore + Plumb the remote logger throughut Buildah + Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + Run: Cleanup run directory after every RUN step + build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + Makefile: check for `-race` using `-mod=vendor` + imagebuildah: fix an attempt to write to a nil map + push: support to specify the compression format + conformance: allow test cases to specify dockerUseBuildKit + build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + unmarshalConvertedConfig(): handle zstd compression + tests/copy/copy: wire up compression options + Update to github.com/vbauerster/mpb v7.1.5 + Add flouthoc to OWNERS + build: Add additional step nodes when labels are modified + Makefile: turn on race detection whenever it's available + conformance: add more tests for exclusion short-circuiting + Update VM Images + Drop prior-ubuntu testing + Bump to v1.24.0-dev ## v1.23.0 (2021-09-13) diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 27e4ade6d67..003d209ff09 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -3,7 +3,7 @@ export GOPROXY=https://proxy.golang.org APPARMORTAG := $(shell hack/apparmor_tag.sh) STORAGETAGS := $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) SECURITYTAGS ?= seccomp $(APPARMORTAG) -TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) +TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) BUILDTAGS += $(TAGS) PREFIX := /usr/local BINDIR := $(PREFIX)/bin @@ -12,6 +12,8 @@ BUILDFLAGS := -tags "$(BUILDTAGS)" BUILDAH := buildah GO := go +GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi) +GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi) GO110 := 1.10 GOVERSION := $(findstring $(GO110),$(shell go version)) # test for go module support @@ -22,6 +24,7 @@ else export GO_BUILD=$(GO) build export GO_TEST=$(GO) test endif +RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race) GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed")) SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed")) @@ -33,8 +36,8 @@ RUNC_COMMIT := v1.0.0-rc8 LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= -BUILDAH_LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go docker/*.go manifests/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go LINTFLAGS ?= @@ -65,14 +68,15 @@ static: cp -rfp ./result/bin/* ./bin/ bin/buildah: $(SOURCES) cmd/buildah/*.go - $(GO_BUILD) $(BUILDAH_LDFLAGS) -gcflags "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah + $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah .PHONY: buildah buildah: bin/buildah -LINUX_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^linux/))) -DARWIN_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^darwin/))) -WINDOWS_CROSS_TARGETS = $(addsuffix .exe,$(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^windows/)))) +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) +LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) +DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) +WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) .PHONY: cross cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS) @@ -107,7 +111,6 @@ codespell: .PHONY: validate validate: install.tools ./tests/validate/whitespace.sh - ./tests/validate/git-validation.sh ./hack/xref-helpmsgs-manpages ./tests/validate/pr-should-include-tests ./tests/validate/buildahimages-are-sane @@ -164,14 +167,14 @@ test-integration: install.tools cd tests; ./test_runner.sh tests/testreport/testreport: tests/testreport/testreport.go - $(GO_BUILD) -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go + $(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go .PHONY: test-unit test-unit: tests/testreport/testreport - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m tmp=$(shell mktemp -d) ; \ mkdir -p $$tmp/root $$tmp/runroot; \ - $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf + $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf vendor-in-container: podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor diff --git a/vendor/github.com/containers/buildah/OWNERS b/vendor/github.com/containers/buildah/OWNERS index 1e070e396e6..2eb9ea84c3e 100644 --- a/vendor/github.com/containers/buildah/OWNERS +++ b/vendor/github.com/containers/buildah/OWNERS @@ -2,6 +2,7 @@ approvers: - TomSweeneyRedHat - ashley-cui - cevich + - flouthoc - giuseppe - lsm5 - nalind diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index 95c8a9a7b35..512d3f87368 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -77,7 +77,9 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh): ```bash $ cat > lighttpd.sh <<"EOF" -#!/usr/bin/env bash -x +#!/usr/bin/env bash + +set -x ctr1=$(buildah from "${1:-fedora}") @@ -103,27 +105,27 @@ $ sudo ./lighttpd.sh ## Commands | Command | Description | | ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | -| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. | -| [buildah-build(1)](/docs/buildah-build.md) | Build an image using instructions from Containerfiles or Dockerfiles. | -| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. | -| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. | -| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. | -| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. | -| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | -| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. | -| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. | -| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. | -| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. | -| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. | -| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. | -| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. | -| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. | -| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. | -| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. | -| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. | -| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. | -| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. | -| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information | +| [buildah-add(1)](/docs/buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. | +| [buildah-build(1)](/docs/buildah-build.1.md) | Build an image using instructions from Containerfiles or Dockerfiles. | +| [buildah-commit(1)](/docs/buildah-commit.1.md) | Create an image from a working container. | +| [buildah-config(1)](/docs/buildah-config.1.md) | Update image configuration settings. | +| [buildah-containers(1)](/docs/buildah-containers.1.md) | List the working containers and their base images. | +| [buildah-copy(1)](/docs/buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. | +| [buildah-from(1)](/docs/buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. | +| [buildah-images(1)](/docs/buildah-images.1.md) | List images in local storage. | +| [buildah-info(1)](/docs/buildah-info.1.md) | Display Buildah system information. | +| [buildah-inspect(1)](/docs/buildah-inspect.1.md) | Inspects the configuration of a container or image. | +| [buildah-mount(1)](/docs/buildah-mount.1.md) | Mount the working container's root filesystem. | +| [buildah-pull(1)](/docs/buildah-pull.1.md) | Pull an image from the specified location. | +| [buildah-push(1)](/docs/buildah-push.1.md) | Push an image from local storage to elsewhere. | +| [buildah-rename(1)](/docs/buildah-rename.1.md) | Rename a local container. | +| [buildah-rm(1)](/docs/buildah-rm.1.md) | Removes one or more working containers. | +| [buildah-rmi(1)](/docs/buildah-rmi.1.md) | Removes one or more images. | +| [buildah-run(1)](/docs/buildah-run.1.md) | Run a command inside of the container. | +| [buildah-tag(1)](/docs/buildah-tag.1.md) | Add an additional name to a local image. | +| [buildah-umount(1)](/docs/buildah-umount.1.md) | Unmount a working container's root file system. | +| [buildah-unshare(1)](/docs/buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. | +| [buildah-version(1)](/docs/buildah-version.1.md) | Display the Buildah Version Information | **Future goals include:** * more CI tests diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index f17e3a9c943..8aa53a2920f 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -47,8 +47,10 @@ type AddAndCopyOptions struct { // If the sources include directory trees, Hasher will be passed // tar-format archives of the directory trees. Hasher io.Writer - // Excludes is the contents of the .dockerignore file. + // Excludes is the contents of the .containerignore file. Excludes []string + // IgnoreFile is the path to the .containerignore file. + IgnoreFile string // ContextDir is the base directory for content being copied and // Excludes patterns. ContextDir string @@ -199,6 +201,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption if err != nil { return errors.Wrapf(err, "error determining current working directory") } + } else { + if !filepath.IsAbs(options.ContextDir) { + contextDir, err = filepath.Abs(options.ContextDir) + if err != nil { + return errors.Wrapf(err, "error converting context directory path %q to an absolute path", options.ContextDir) + } + } } // Figure out what sorts of sources we have. @@ -564,7 +573,11 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption } } if itemsCopied == 0 { - return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out)", localSourceStat.Glob, len(localSourceStat.Globbed)) + excludesFile := "" + if options.IgnoreFile != "" { + excludesFile = " using " + options.IgnoreFile + } + return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile) } } return nil @@ -642,3 +655,37 @@ func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint3 } return owner.UID, owner.GID, nil } + +// EnsureContainerPathAs creates the specified directory owned by USER +// with the file mode set to MODE. +func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + mountPoint, err := b.Mount(b.MountLabel) + if err != nil { + return err + } + defer func() { + if err2 := b.Unmount(); err2 != nil { + logrus.Errorf("error unmounting container: %v", err2) + } + }() + + uid, gid := uint32(0), uint32(0) + if user != "" { + if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil { + uid = uidForCopy + gid = gidForCopy + } + } + + destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap) + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} + opts := copier.MkdirOptions{ + ChmodNew: mode, + ChownNew: idPair, + UIDMap: destUIDMap, + GIDMap: destGIDMap, + } + return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts) + +} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go index 789233405e2..0e45d12c2c8 100644 --- a/vendor/github.com/containers/buildah/bind/mount.go +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -270,7 +270,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { } return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint) } - if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { // nolint:unconvert (required for some OS/arch combinations) + if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations) logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint) continue } diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index f760d252767..8a850e90866 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -13,6 +13,7 @@ import ( "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage" @@ -154,6 +155,10 @@ type Builder struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use when running processes in the container with non-host user namespaces. IDMappingOptions define.IDMappingOptions // Capabilities is a list of capabilities to use when running commands in the container. @@ -257,6 +262,8 @@ type BuilderOptions struct { // or "scratch" to indicate that the container should not be based on // an image. FromImage string + // ContainerSuffix is the suffix to add for generated container names + ContainerSuffix string // Container is a desired name for the build container. Container string // PullPolicy decides whether or not we should pull the image that @@ -271,6 +278,8 @@ type BuilderOptions struct { // to store copies of layer blobs that we pull down, if any. It should // already exist. BlobDirectory string + // Logger is the logrus logger to write log messages with + Logger *logrus.Logger `json:"-"` // Mount signals to NewBuilder() that the container should be mounted // immediately. Mount bool @@ -307,6 +316,10 @@ type BuilderOptions struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use if we're setting up our own user namespace. IDMappingOptions *define.IDMappingOptions // Capabilities is a list of capabilities to use when @@ -327,6 +340,10 @@ type BuilderOptions struct { // OciDecryptConfig contains the config that can be used to decrypt an image if it is // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig + // ProcessLabel is the SELinux process label associated with the container + ProcessLabel string + // MountLabel is the SELinux mount label associated with the container + MountLabel string } // ImportOptions are used to initialize a Builder from an existing container @@ -396,6 +413,12 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) { if b.Type != containerType { return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type) } + + netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath) + if err != nil { + return nil, err + } + b.NetworkInterface = netInt b.store = store b.fixupConfig(nil) b.setupLogger() diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 8926b2e6f37..16528b87e55 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,6 +1,241 @@ -- Changelog for v1.23.1 (2021-09-27) - * Vendor containers/common v0.44.2 - * post-1.23 branch fixups +- Changelog for v1.25.1 (2022-03-30) + * buildah: create WORKDIR with USER permissions + * vendor: update github.com/openshift/imagebuilder + * copier: attempt to open the dir before adding it + * Updated dependabot to get updates for GitHub actions. + * Switch most calls to filepath.Walk to filepath.WalkDir + * build: allow --no-cache and --layers so build cache can be overrided + * build(deps): bump github.com/onsi/gomega from 1.18.1 to 1.19.0 + * Bump to v1.26.0-dev + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + +- Changelog for v1.25.0 (2022-03-25) + * install: drop RHEL/CentOS 7 doc + * build(deps): bump github.com/containers/common from 0.47.4 to 0.47.5 + * Bump c/storage to v1.39.0 in main + * Add a test for CVE-2022-27651 + * build(deps): bump github.com/docker/docker + * Bump github.com/prometheus/client_golang to v1.11.1 + * [CI:DOCS] man pages: sort flags, and keep them that way + * build(deps): bump github.com/containerd/containerd from 1.6.1 to 1.6.2 + * Don't pollute + * network setup: increase timeout to 4 minutes + * do not set the inheritable capabilities + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * build(deps): bump github.com/containers/ocicrypt from 1.1.2 to 1.1.3 + * parse: convert exposed GetVolumes to internal only + * buildkit: mount=type=cache support locking external cache store + * .in support: improve error message when cpp is not installed + * buildah image: install cpp + * build(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 + * build(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 + * build(deps): bump github.com/docker/docker + * Add --no-hosts flag to eliminate use of /etc/hosts within containers + * test: remove skips for rootless users + * test: unshare mount/umount if test is_rootless + * tests/copy: read correct containers.conf + * build(deps): bump github.com/docker/distribution + * cirrus: add seperate task and matrix for rootless + * tests: skip tests for rootless which need unshare + * buildah: test rootless integration + * vendor: bump c/storage to main/93ce26691863 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.9 to 1.7.10 + * tests/copy: initialize the network, too + * [CI:DOCS] remove references to Kubic for CentOS and Ubuntu + * build(deps): bump github.com/containerd/containerd from 1.6.0 to 1.6.1 + * use c/image/pkg/blobcache + * vendor c/image/v5@v5.20.0 + * add: ensure the context directory is an absolute path + * executor: docker builds must inherit healthconfig from base if any + * docs: Remove Containerfile and containeringore + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.8 to 1.7.9 + * helpers.bash: Use correct syntax + * speed up combination-namespaces test + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * Bump back to 1.25.0-dev + * build(deps): bump github.com/containerd/containerd from 1.5.9 to 1.6.0 + +- Changelog for v1.24.2 (2022-02-16) + * Increase subuid/subgid to 65535 + * history: only add proxy vars to history if specified + * run_linux: use --systemd-cgroup + * buildah: new global option --cgroup-manager + * Makefile: build with systemd when available + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.7 to 1.7.8 + * Bump c/common to v0.47.4 + * Cirrus: Use updated VM images + * conformance: add a few "replace-directory-with-symlink" tests + * Bump back to v1.25.0-dev + +- Changelog for v1.24.1 (2022-02-03) + * executor: Add support for inline --platform within Dockerfile + * caps: fix buildah run --cap-add=all + * Update vendor of openshift/imagebuilder + * Bump version of containers/image and containers/common + * Update vendor of containers/common + * System tests: fix accidental vandalism of source dir + * build(deps): bump github.com/containers/storage from 1.38.1 to 1.38.2 + * imagebuildah.BuildDockerfiles(): create the jobs semaphore + * build(deps): bump github.com/onsi/gomega from 1.18.0 to 1.18.1 + * overlay: always honor mountProgram + * overlay: move mount program invocation to separate function + * overlay: move mount program lookup to separate function + * Bump to v1.25.0-dev [NO TESTS NEEDED] + +- Changelog for v1.24.0 (2022-01-26) + * Update vendor of containers/common + * build(deps): bump github.com/golangci/golangci-lint in /tests/tools + * Github-workflow: Report both failures and errors. + * build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0 + * Update docs/buildah-build.1.md + * [CI:DOCS] Fix typos and improve language + * buildah bud --network add support for custom networks + * Make pull commands be consistent + * docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing + * build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0 + * Vendor in latest containers/image + * Run codespell on code + * .github/dependabot.yml: add tests/tools go.mod + * CI: rm git-validation, add GHA job to validate PRs + * tests/tools: bump go-md2man to v2.0.1 + * tests/tools/Makefile: simplify + * tests/tools: bump onsi/ginkgo to v1.16.5 + * vendor: bump c/common and others + * mount: add support for custom upper and workdir with overlay mounts + * linux: fix lookup for runtime + * overlay: add MountWithOptions to API which extends support for advanced overlay + * Allow processing of SystemContext from FlagSet + * .golangci.yml: enable unparam linter + * util/resolveName: rm bool return + * tests/tools: bump golangci-lint + * .gitignore: fixups + * all: fix capabilities.NewPid deprecation warnings + * bind/mount.go: fix linter comment + * all: fix gosimple warning S1039 + * tests/e2e/buildah_suite_test.go: fix gosimple warnings + * imagebuildah/executor.go: fix gosimple warning + * util.go: fix gosimple warning + * build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0 + * Enable git-daemon tests + * Allow processing of id options from FlagSet + * Cirrus: Re-order tasks for more parallelism + * Cirrus: Freshen VM images + * Fix platform handling for empty os/arch values + * Allow processing of network options from FlagSet + * Fix permissions on secrets directory + * Update containers/image and containers/common + * bud.bats: use a local git daemon for the git protocol test + * Allow processing of common options from FlagSet + * Cirrus: Run int. tests in parallel with unit + * vendor c/common + * Fix default CNI paths + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7 + * multi-stage: enable mounting stages across each other with selinux enabled + * executor: Share selinux label of first stage with other stages in a build + * buildkit: add from field to bind and cache mounts so images can be used as source + * Use config.ProxyEnv from containers/common + * use libnetwork from c/common for networking + * setup the netns in the buildah parent process + * build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9 + * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6 + * build: fix libsubid test + * Allow callers to replace the ContainerSuffix + * parse: allow parsing anomaly non-human value for memory control group + * .cirrus: remove static_build from ci + * stage_executor: re-use all possible layers from cache for squashed builds + * build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 + * Allow rootless buildah to set resource limits on cgroup V2 + * build(deps): bump github.com/docker/docker + * tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification + * build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3 + * Wire logger through to config + * copier.Put: check for is-not-a-directory using lstat, not stat + * Turn on rootless cgroupv2 tests + * Grab all of the containers.conf settings for namespaces. + * image: set MediaType in OCI manifests + * copier: RemoveAll possibly-directories + * Simple README fix + * images: accept multiple filter with logical AND + * build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1 + * UPdate vendor of container/storage + * build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0 + * build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0 + * Make LocalIP public function so Podman can use it + * Fix UnsetEnv for buildah bud + * Tests should rely only on static/unchanging images + * run: ensure that stdio pipes are labeled correctly + * build(deps): bump github.com/docker/docker + * Cirrus: Bump up to Fedora 35 & Ubuntu 21.10 + * chroot: don't use the generate default seccomp filter for unit tests + * build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8 + * ssh-agent: Increase timeout before we explicitly close connection + * docs/tutorials: update + * Clarify that manifest defaults to localhost as the registry name + * "config": remove a stray bit of debug output + * "commit": fix a flag typo + * Fix an error message: unlocking vs locking + * Expand the godoc for CommonBuildOptions.Secrets + * chroot: accept an "rw" option + * Add --unsetenv option to buildah commit and build + * define.TempDirForURL(): show CombinedOutput when a command fails + * config: support the variant field + * rootless: do not bind mount /sys if not needed + * Fix tutorial to specify command on buildah run line + * build: history should not contain ARG values + * docs: Use guaranteed path for go-md2man + * run: honor --network=none from builder if nothing specified + * networkpolicy: Should be enabled instead of default when explictly set + * Add support for env var secret sources + * build(deps): bump github.com/docker/docker + * fix: another non-portable shebang + * Rootless containers users should use additional groups + * Support overlayfs path contains colon + * Report ignorefile location when no content added + * Add support for host.containers.internal in the /etc/hosts + * build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5 + * imagebuildah: fix nil deref + * buildkit: add support for mount=type=cache + * Default secret mode to 400 + * [CI:DOCS] Include manifest example usage + * docs: update buildah-from, buildah-pull 'platform' option compatibility notes + * docs: update buildah-build 'platform' option compatibility notes + * De-dockerize the man page as much as possible + * [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st + * docs: Fix and Update Containerfile man page with supported mount types + * mount: add tmpcopyup to tmpfs mount option + * buildkit: Add support for --mount=type=tmpfs + * build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1 + * Fix command doc links in README.md + * build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1 + * build: Add support for buildkit like --mount=type=bind + * Bump containerd to v1.5.7 + * build(deps): bump github.com/docker/docker + * tests: stop pulling php, composer + * Fix .containerignore link file + * Cirrus: Fix defunct package metadata breaking cache + * build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0 + * buildah build: add --all-platforms + * Add man page for Containerfile and .containerignore + * Plumb the remote logger throughut Buildah + * Replace fmt.Sprintf("%d", x) with strconv.Itoa(x) + * Run: Cleanup run directory after every RUN step + * build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0 + * Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation + * Makefile: check for `-race` using `-mod=vendor` + * imagebuildah: fix an attempt to write to a nil map + * push: support to specify the compression format + * conformance: allow test cases to specify dockerUseBuildKit + * build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0 + * build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1 + * unmarshalConvertedConfig(): handle zstd compression + * tests/copy/copy: wire up compression options + * Update to github.com/vbauerster/mpb v7.1.5 + * Add flouthoc to OWNERS + * build: Add additional step nodes when labels are modified + * Makefile: turn on race detection whenever it's available + * conformance: add more tests for exclusion short-circuiting + * Update VM Images + Drop prior-ubuntu testing + * Bump to v1.24.0-dev - Changelog for v1.23.0 (2021-09-13) * Vendor in containers/common v0.44.0 diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go index e6f28e81a1c..dcfbd0f2474 100644 --- a/vendor/github.com/containers/buildah/chroot/run.go +++ b/vendor/github.com/containers/buildah/chroot/run.go @@ -238,7 +238,7 @@ func runUsingChrootMain() { // Set the kernel's lock to "unlocked". locked := 0 if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 { - logrus.Errorf("error locking PTY descriptor: %v", err) + logrus.Errorf("error unlocking PTY descriptor: %v", err) os.Exit(1) } // Get a handle for the other end. @@ -883,46 +883,49 @@ func setApparmorProfile(spec *specs.Spec) error { // setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start. func setCapabilities(spec *specs.Spec, keepCaps ...string) error { - currentCaps, err := capability.NewPid(0) + currentCaps, err := capability.NewPid2(0) if err != nil { return errors.Wrapf(err, "error reading capabilities of current process") } - caps, err := capability.NewPid(0) + if err := currentCaps.Load(); err != nil { + return errors.Wrapf(err, "error loading capabilities") + } + caps, err := capability.NewPid2(0) if err != nil { return errors.Wrapf(err, "error reading capabilities of current process") } capMap := map[capability.CapType][]string{ capability.BOUNDING: spec.Process.Capabilities.Bounding, capability.EFFECTIVE: spec.Process.Capabilities.Effective, - capability.INHERITABLE: spec.Process.Capabilities.Inheritable, + capability.INHERITABLE: []string{}, capability.PERMITTED: spec.Process.Capabilities.Permitted, capability.AMBIENT: spec.Process.Capabilities.Ambient, } knownCaps := capability.List() - caps.Clear(capability.CAPS | capability.BOUNDS | capability.AMBS) + noCap := capability.Cap(-1) for capType, capList := range capMap { for _, capToSet := range capList { - cap := capability.CAP_LAST_CAP + cap := noCap for _, c := range knownCaps { if strings.EqualFold("CAP_"+c.String(), capToSet) { cap = c break } } - if cap == capability.CAP_LAST_CAP { + if cap == noCap { return errors.Errorf("error mapping capability %q to a number", capToSet) } caps.Set(capType, cap) } for _, capToSet := range keepCaps { - cap := capability.CAP_LAST_CAP + cap := noCap for _, c := range knownCaps { if strings.EqualFold("CAP_"+c.String(), capToSet) { cap = c break } } - if cap == capability.CAP_LAST_CAP { + if cap == noCap { return errors.Errorf("error mapping capability %q to a number", capToSet) } if currentCaps.Get(capType, cap) { @@ -1191,21 +1194,33 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } requestFlags := bindFlags expectedFlags := uintptr(0) - if util.StringInSlice("nodev", m.Options) { - requestFlags |= unix.MS_NODEV - expectedFlags |= unix.ST_NODEV - } - if util.StringInSlice("noexec", m.Options) { - requestFlags |= unix.MS_NOEXEC - expectedFlags |= unix.ST_NOEXEC - } - if util.StringInSlice("nosuid", m.Options) { - requestFlags |= unix.MS_NOSUID - expectedFlags |= unix.ST_NOSUID - } - if util.StringInSlice("ro", m.Options) { - requestFlags |= unix.MS_RDONLY - expectedFlags |= unix.ST_RDONLY + for _, option := range m.Options { + switch option { + case "nodev": + requestFlags |= unix.MS_NODEV + expectedFlags |= unix.ST_NODEV + case "dev": + requestFlags &= ^uintptr(unix.MS_NODEV) + expectedFlags &= ^uintptr(unix.ST_NODEV) + case "noexec": + requestFlags |= unix.MS_NOEXEC + expectedFlags |= unix.ST_NOEXEC + case "exec": + requestFlags &= ^uintptr(unix.MS_NOEXEC) + expectedFlags &= ^uintptr(unix.ST_NOEXEC) + case "nosuid": + requestFlags |= unix.MS_NOSUID + expectedFlags |= unix.ST_NOSUID + case "suid": + requestFlags &= ^uintptr(unix.MS_NOSUID) + expectedFlags &= ^uintptr(unix.ST_NOSUID) + case "ro": + requestFlags |= unix.MS_RDONLY + expectedFlags |= unix.ST_RDONLY + case "rw": + requestFlags &= ^uintptr(unix.MS_RDONLY) + expectedFlags &= ^uintptr(unix.ST_RDONLY) + } } switch m.Type { case "bind": diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go index 1ca0a159ebc..f130f7a22fd 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -3,6 +3,9 @@ package chroot import ( + "io/ioutil" + + "github.com/containers/common/pkg/seccomp" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" libseccomp "github.com/seccomp/libseccomp-golang" @@ -171,3 +174,27 @@ func setSeccomp(spec *specs.Spec) error { } return nil } + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + switch seccompProfilePath { + case "unconfined": + spec.Linux.Seccomp = nil + case "": + seccompConfig, err := seccomp.GetDefaultProfile(spec) + if err != nil { + return errors.Wrapf(err, "loading default seccomp profile failed") + } + spec.Linux.Seccomp = seccompConfig + default: + seccompProfile, err := ioutil.ReadFile(seccompProfilePath) + if err != nil { + return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath) + } + seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec) + if err != nil { + return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath) + } + spec.Linux.Seccomp = seccompConfig + } + return nil +} diff --git a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go index a5b74bf092c..f33dd254a35 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp_unsupported.go @@ -13,3 +13,11 @@ func setSeccomp(spec *specs.Spec) error { } return nil } + +func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error { + if spec.Linux != nil { + // runtime-tools may have supplied us with a default filter + spec.Linux.Seccomp = nil + } + return nil +} diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index bbf1727fbd5..25c30071611 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -101,6 +101,8 @@ type CommitOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + // UnsetEnvs is a list of environments to not add to final image. + UnsetEnvs []string } var ( diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index a3da64e6bed..effaa81e4d0 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -8,9 +8,11 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" "github.com/containers/buildah/docker" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/stringid" @@ -28,18 +30,24 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference())) } if wantedManifestMIMEType != actualManifestMIMEType { + layerInfos := img.LayerInfos() + for i := range layerInfos { // force the "compression" to gzip, which is supported by all of the formats we care about + layerInfos[i].CompressionOperation = types.Compress + layerInfos[i].CompressionAlgorithm = &compression.Gzip + } updatedImg, err := img.UpdatedImage(ctx, types.ManifestUpdateOptions{ + LayerInfos: layerInfos, + }) + if err != nil { + return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference())) + } + secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{ ManifestMIMEType: wantedManifestMIMEType, - InformationOnly: types.ManifestUpdateInformation{ // Strictly speaking, every value in here is invalid. But… - Destination: nil, // Destination is technically required, but actually necessary only for conversion _to_ v2s1. Leave it nil, we will crash if that ever changes. - LayerInfos: nil, // LayerInfos is necessary for size information in v2s2/OCI manifests, but the code can work with nil, and we are not reading the converted manifest at all. - LayerDiffIDs: nil, // LayerDiffIDs are actually embedded in the converted manifest, but the code can work with nil, and the values are not needed until pushing the finished image, at which time containerImageRef.NewImageSource builds the values from scratch. - }, }) if err != nil { return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType) } - img = updatedImg + img = secondUpdatedImg } config, err := img.ConfigBlob(ctx) if err != nil { @@ -126,6 +134,10 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) { } else { b.SetArchitecture(runtime.GOARCH) } + // in case the arch string we started with was shorthand for a known arch+variant pair, normalize it + ps := platforms.Normalize(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()}) + b.SetArchitecture(ps.Architecture) + b.SetVariant(ps.Variant) } if b.Format == define.Dockerv2ImageManifest && b.Hostname() == "" { b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID())) @@ -205,6 +217,21 @@ func (b *Builder) SetArchitecture(arch string) { b.Docker.Architecture = arch } +// Variant returns a name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) Variant() string { + return b.OCIv1.Variant +} + +// SetVariant sets the name of the architecture variant on which the container, +// or a container built using an image built from this container, is intended +// to be run. +func (b *Builder) SetVariant(variant string) { + b.Docker.Variant = variant + b.OCIv1.Variant = variant +} + // Maintainer returns contact information for the person who built the image. func (b *Builder) Maintainer() string { return b.OCIv1.Author @@ -247,7 +274,7 @@ func (b *Builder) ClearOnBuild() { // discarded when writing images using OCIv1 formats. func (b *Builder) SetOnBuild(onBuild string) { if onBuild != "" && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild) + b.Logger.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild) } b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild) } @@ -279,7 +306,7 @@ func (b *Builder) Shell() []string { // discarded when writing images using OCIv1 formats. func (b *Builder) SetShell(shell []string) { if len(shell) > 0 && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell) + b.Logger.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell) } b.Docker.Config.Shell = copyStringSlice(shell) @@ -516,7 +543,7 @@ func (b *Builder) Domainname() string { // discarded when writing images using OCIv1 formats. func (b *Builder) SetDomainname(name string) { if name != "" && b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name) + b.Logger.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name) } b.Docker.Config.Domainname = name } @@ -593,7 +620,7 @@ func (b *Builder) SetHealthcheck(config *docker.HealthConfig) { b.Docker.Config.Healthcheck = nil if config != nil { if b.Format != define.Dockerv2ImageManifest { - logrus.Warnf("Healthcheck is not supported for OCI image format and will be ignored. Must use `docker` format") + b.Logger.Warnf("HEALTHCHECK is not supported for OCI image format and will be ignored. Must use `docker` format") } b.Docker.Config.Healthcheck = &docker.HealthConfig{ Test: copyStringSlice(config.Test), diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 1823e523840..d9f531acc2e 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "io/fs" "io/ioutil" "net" "os" @@ -1179,10 +1180,10 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa // we don't expand any of the contents that are archives options := req.GetOptions options.ExpandArchives = false - walkfn := func(path string, info os.FileInfo, err error) error { + walkfn := func(path string, d fs.DirEntry, err error) error { if err != nil { if options.IgnoreUnreadable && errorIsPermission(err) { - if info != nil && info.IsDir() { + if info != nil && d.IsDir() { return filepath.SkipDir } return nil @@ -1192,8 +1193,8 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } return errors.Wrapf(err, "copier: get: error reading %q", path) } - if info.Mode()&os.ModeType == os.ModeSocket { - logrus.Warningf("copier: skipping socket %q", info.Name()) + if d.Type() == os.ModeSocket { + logrus.Warningf("copier: skipping socket %q", d.Name()) return nil } // compute the path of this item @@ -1216,7 +1217,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return err } if skip { - if info.IsDir() { + if d.IsDir() { // if there are no "include // this anyway" patterns at // all, we don't need to @@ -1254,17 +1255,21 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa } // if it's a symlink, read its target symlinkTarget := "" - if info.Mode()&os.ModeType == os.ModeSymlink { + if d.Type() == os.ModeSymlink { target, err := os.Readlink(path) if err != nil { return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path) } symlinkTarget = target } + info, err := d.Info() + if err != nil { + return err + } // if it's a directory and we're staying on one device, and it's on a // different device than the one we started from, skip its contents var ok error - if info.Mode().IsDir() && req.GetOptions.NoCrossDevice { + if d.IsDir() && req.GetOptions.NoCrossDevice { if !sameDevice(topInfo, info) { ok = filepath.SkipDir } @@ -1282,7 +1287,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa return ok } // walk the directory tree, checking/adding items individually - if err := filepath.Walk(item, walkfn); err != nil { + if err := filepath.WalkDir(item, walkfn); err != nil { return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item) } itemsCopied++ @@ -1461,6 +1466,13 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str return errors.Wrapf(err, "error opening file for adding its contents to archive") } defer f.Close() + } else if hdr.Typeflag == tar.TypeDir { + // open the directory file first to make sure we can access it. + f, err = os.Open(contentPath) + if err != nil { + return errors.Wrapf(err, "error opening directory for adding its contents to archive") + } + defer f.Close() } // output the header if err = tw.WriteHeader(hdr); err != nil { @@ -1660,7 +1672,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM // only check the length if there wasn't an error, which we'll // check along with errors for other types of entries if err == nil && written != hdr.Size { - return errors.Errorf("copier: put: error creating %q: incorrect length (%d != %d)", path, written, hdr.Size) + return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size) } case tar.TypeLink: var linkTarget string @@ -1681,7 +1693,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = os.Link(linkTarget, path) } } @@ -1696,7 +1708,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)) } } @@ -1711,7 +1723,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))) } } @@ -1726,14 +1738,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))) } } case tar.TypeDir: if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) { var st os.FileInfo - if st, err = os.Stat(path); err == nil && !st.IsDir() { + if st, err = os.Lstat(path); err == nil && !st.IsDir() { // it's not a directory, so remove it and mkdir if err = os.Remove(path); err == nil { err = os.Mkdir(path, 0700) @@ -1758,7 +1770,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM break } } - if err = os.Remove(path); err == nil { + if err = os.RemoveAll(path); err == nil { err = mkfifo(path, 0600) } } diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index cff9a3d830b..64849153174 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -4,6 +4,7 @@ import ( "io" "time" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage/pkg/archive" @@ -36,11 +37,14 @@ type CommonBuildOptions struct { DNSServers []string // DNSOptions is the list of DNS DNSOptions []string - // MemorySwap limits the amount of memory and swap together. - MemorySwap int64 // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable". // Recognized field names are "role", "type", and "level". LabelOpts []string + // MemorySwap limits the amount of memory and swap together. + MemorySwap int64 + // NoHosts tells the builder not to create /etc/hosts content when running + // containers. + NoHosts bool // OmitTimestamp forces epoch 0 as created timestamp to allow for // deterministic, content-addressable builds. OmitTimestamp bool @@ -70,7 +74,9 @@ type CommonBuildOptions struct { Ulimit []string // Volumes to bind mount into the container Volumes []string - // Secrets are the available secrets to use in a build + // Secrets are the available secrets to use in a build. Each item in the + // slice takes the form "id=foo,src=bar", where both "id" and "src" are + // required, in that order, and "bar" is the name of a file. Secrets []string // SSHSources is the available ssh agent connections to forward in the build SSHSources []string @@ -78,6 +84,8 @@ type CommonBuildOptions struct { // BuildOptions can be used to alter how an image is built. type BuildOptions struct { + // ContainerSuffix it the name to suffix containers with + ContainerSuffix string // ContextDirectory is the default source location for COPY and ADD // commands. ContextDirectory string @@ -157,6 +165,10 @@ type BuildOptions struct { // CNIConfigDir is the location of CNI configuration files, if the files in // the default configuration directory shouldn't be used. CNIConfigDir string + + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + NetworkInterface nettypes.ContainerNetwork `json:"-"` + // ID mapping options to use if we're setting up our own user namespace // when handling RUN instructions. IDMappingOptions *IDMappingOptions @@ -227,6 +239,8 @@ type BuildOptions struct { RusageLogFile string // Excludes is a list of excludes to be used instead of the .dockerignore file. Excludes []string + // IgnoreFile is a name of the .containerignore file + IgnoreFile string // From is the image name to use to replace the value specified in the first // FROM instruction in the Containerfile From string @@ -234,4 +248,10 @@ type BuildOptions struct { // to build the image for. If this slice has items in it, the OS and // Architecture fields above are ignored. Platforms []struct{ OS, Arch, Variant string } + // AllPlatforms tells the builder to set the list of target platforms + // to match the set of platforms for which all of the build's base + // images are available. If this field is set, Platforms is ignored. + AllPlatforms bool + // UnsetEnvs is a list of environments to not add to final image. + UnsetEnvs []string } diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 4f3ebf01a49..46db9a609fc 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,15 +29,11 @@ const ( Package = "buildah" // Version for the Package. Bump version in contrib/rpm/buildah.spec // too. - Version = "1.23.1" + Version = "1.25.1" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" - DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin" - // DefaultCNIConfigDir is the default location of CNI configuration files. - DefaultCNIConfigDir = "/etc/cni/net.d" - // OCIv1ImageManifest is the MIME type of an OCIv1 image manifest, // suitable for specifying as a value of the PreferredManifestType // member of a CommitOptions structure. It is also the default. @@ -93,6 +89,13 @@ type IDMappingOptions struct { GIDMap []specs.LinuxIDMapping } +// Secret is a secret source that can be used in a RUN +type Secret struct { + ID string + Source string + SourceType string +} + // TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along @@ -117,12 +120,12 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err return "", "", errors.Wrapf(err, "error parsing url %q", url) } if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") { - err = cloneToDirectory(url, name) + combinedOutput, err := cloneToDirectory(url, name) if err != nil { if err2 := os.RemoveAll(name); err2 != nil { logrus.Debugf("error removing temporary directory %q: %v", name, err2) } - return "", "", err + return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput)) } return name, "", nil } @@ -160,7 +163,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err return "", "", errors.Errorf("unreachable code reached") } -func cloneToDirectory(url, dir string) error { +func cloneToDirectory(url, dir string) ([]byte, error) { gitBranch := strings.Split(url, "#") var cmd *exec.Cmd if len(gitBranch) < 2 { @@ -170,7 +173,7 @@ func cloneToDirectory(url, dir string) error { logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir) cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir) } - return cmd.Run() + return cmd.CombinedOutput() } func downloadToDirectory(url, dir string) error { diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go index 561287ac276..b0ed2e4c021 100644 --- a/vendor/github.com/containers/buildah/docker/types.go +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -151,6 +151,8 @@ type V1Image struct { Config *Config `json:"config,omitempty"` // Architecture is the hardware that the image is build and runs on Architecture string `json:"architecture,omitempty"` + // Variant is a variant of the CPU that the image is built and runs on + Variant string `json:"variant,omitempty"` // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // Size is the total size of the image including all layers it is composed of diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index ef05f37d26e..e859de183db 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -239,6 +239,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, Versioned: specs.Versioned{ SchemaVersion: 2, }, + MediaType: v1.MediaTypeImageManifest, Config: v1.Descriptor{ MediaType: v1.MediaTypeImageConfig, }, @@ -752,6 +753,10 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err if manifestType == "" { manifestType = define.OCIv1ImageManifest } + + for _, u := range options.UnsetEnvs { + b.UnsetEnv(u) + } oconfig, err := json.Marshal(&b.OCIv1) if err != nil { return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1) diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index bdb407885b2..2384306dbd7 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -10,15 +10,19 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "sync" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" "github.com/containers/buildah/util" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" + "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/shortnames" istorage "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" "github.com/containers/storage" @@ -58,6 +62,10 @@ type BuildOptions = define.BuildOptions // returns the ID of the built image, and if a name was assigned to it, a // canonical reference for that image. func BuildDockerfiles(ctx context.Context, store storage.Store, options define.BuildOptions, paths ...string) (id string, ref reference.Canonical, err error) { + if options.CommonBuildOpts == nil { + options.CommonBuildOpts = &define.CommonBuildOptions{} + } + if len(paths) == 0 { return "", nil, errors.Errorf("error building: no dockerfiles specified") } @@ -168,8 +176,17 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B files = append(files, b.Bytes()) } - if options.Jobs != nil && *options.Jobs != 0 { - options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs)) + if options.JobSemaphore == nil { + if options.Jobs != nil { + if *options.Jobs < 0 { + return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") + } + if *options.Jobs > 0 { + options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs)) + } + } else { + options.JobSemaphore = semaphore.NewWeighted(1) + } } manifestList := options.Manifest @@ -193,21 +210,37 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B }) } + if options.AllPlatforms { + options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.SystemContext) + if err != nil { + return "", nil, err + } + } + systemContext := options.SystemContext for _, platform := range options.Platforms { platformContext := *systemContext - platformContext.OSChoice = platform.OS - platformContext.ArchitectureChoice = platform.Arch - platformContext.VariantChoice = platform.Variant + platformSpec := platforms.Normalize(v1.Platform{ + OS: platform.OS, + Architecture: platform.Arch, + Variant: platform.Variant, + }) + // platforms.Normalize converts an empty os value to GOOS + // so we have to check the original value here to not overwrite the default for no reason + if platform.OS != "" { + platformContext.OSChoice = platformSpec.OS + } + if platform.Arch != "" { + platformContext.ArchitectureChoice = platformSpec.Architecture + platformContext.VariantChoice = platformSpec.Variant + } platformOptions := options platformOptions.SystemContext = &platformContext + platformOptions.OS = platformContext.OSChoice + platformOptions.Architecture = platformContext.ArchitectureChoice logPrefix := "" if len(options.Platforms) > 1 { - logPrefix = "[" + platform.OS + "/" + platform.Arch - if platform.Variant != "" { - logPrefix += "/" + platform.Variant - } - logPrefix += "] " + logPrefix = "[" + platforms.Format(platformSpec) + "] " } builds.Go(func() error { thisID, thisRef, err := buildDockerfilesOnce(ctx, store, logger, logPrefix, platformOptions, paths, files) @@ -217,12 +250,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B id, ref = thisID, thisRef instancesLock.Lock() instances = append(instances, instance{ - ID: thisID, - Platform: v1.Platform{ - OS: platformContext.OSChoice, - Architecture: platformContext.ArchitectureChoice, - Variant: platformContext.VariantChoice, - }, + ID: thisID, + Platform: platformSpec, }) instancesLock.Unlock() return nil @@ -318,6 +347,35 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr } mainNode.Children = append(mainNode.Children, additionalNode.Children...) } + + // Check if any modifications done to labels + // add them to node-layer so it becomes regular + // layer. + // Reason: Docker adds label modification as + // last step which can be processed as regular + // steps and if no modification is done to layers + // its easier to re-use cached layers. + if len(options.Labels) > 0 { + for _, labelSpec := range options.Labels { + label := strings.SplitN(labelSpec, "=", 2) + labelLine := "" + key := label[0] + value := "" + if len(label) > 1 { + value = label[1] + } + // check from only empty key since docker supports empty value + if key != "" { + labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value) + additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine)) + if err != nil { + return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps") + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + } + } + exec, err := newExecutor(logger, logPrefix, store, options, mainNode) if err != nil { return "", nil, errors.Wrapf(err, "error creating build executor") @@ -373,8 +431,8 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string cppCommand := "cpp" cppPath, err := exec.LookPath(cppCommand) if err != nil { - if os.IsNotExist(err) { - err = errors.Errorf("error: %s support requires %s to be installed", containerfile, cppPath) + if errors.Is(err, exec.ErrNotFound) { + err = fmt.Errorf("error: %v: .in support requires %s to be installed", err, cppCommand) } return nil, err } @@ -400,3 +458,194 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string } return &stdoutBuffer, nil } + +// platformsForBaseImages resolves the names of base images from the +// dockerfiles, and if they are all valid references to manifest lists, returns +// the list of platforms that are supported by all of the base images. +func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) { + baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args) + if err != nil { + return nil, errors.Wrapf(err, "determining list of base images") + } + logrus.Debugf("unresolved base images: %v", baseImages) + if len(baseImages) == 0 { + return nil, errors.Wrapf(err, "build uses no non-scratch base images") + } + targetPlatforms := make(map[string]struct{}) + var platformList []struct{ OS, Arch, Variant string } + for baseImageIndex, baseImage := range baseImages { + resolved, err := shortnames.Resolve(systemContext, baseImage) + if err != nil { + return nil, errors.Wrapf(err, "resolving image name %q", baseImage) + } + var manifestBytes []byte + var manifestType string + for _, candidate := range resolved.PullCandidates { + ref, err := docker.NewReference(candidate.Value) + if err != nil { + logrus.Debugf("parsing image reference %q: %v", candidate.Value.String(), err) + continue + } + src, err := ref.NewImageSource(ctx, systemContext) + if err != nil { + logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err) + continue + } + candidateBytes, candidateType, err := src.GetManifest(ctx, nil) + _ = src.Close() + if err != nil { + logrus.Debugf("reading image manifest for %q: %v", baseImage, err) + continue + } + if !manifest.MIMETypeIsMultiImage(candidateType) { + logrus.Debugf("base image %q is not a reference to a manifest list: %v", baseImage, err) + continue + } + if err := candidate.Record(); err != nil { + logrus.Debugf("error recording name %q for base image %q: %v", candidate.Value.String(), baseImage, err) + continue + } + baseImage = candidate.Value.String() + manifestBytes, manifestType = candidateBytes, candidateType + break + } + if len(manifestBytes) == 0 { + if len(resolved.PullCandidates) > 0 { + return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage) + } + return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage) + } + if manifestType != v1.MediaTypeImageIndex { + list, err := manifest.ListFromBlob(manifestBytes, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage) + } + list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex) + if err != nil { + return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage) + } + manifestBytes, err = list.Serialize() + if err != nil { + return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage) + } + } + index, err := manifest.OCI1IndexFromManifest(manifestBytes) + if err != nil { + return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage) + } + if baseImageIndex == 0 { + // populate the list with the first image's normalized platforms + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + targetPlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + } else { + // prune the list of any normalized platforms this base image doesn't support + imagePlatforms := make(map[string]struct{}) + for _, instance := range index.Manifests { + if instance.Platform == nil { + continue + } + platform := platforms.Normalize(*instance.Platform) + imagePlatforms[platforms.Format(platform)] = struct{}{} + logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform)) + } + var removed []string + for platform := range targetPlatforms { + if _, present := imagePlatforms[platform]; !present { + removed = append(removed, platform) + logger.Debugf("image %q does not support %q", baseImage, platform) + } + } + for _, remove := range removed { + delete(targetPlatforms, remove) + } + } + if baseImageIndex == len(baseImages)-1 && len(targetPlatforms) > 0 { + // extract the list + for platform := range targetPlatforms { + platform, err := platforms.Parse(platform) + if err != nil { + return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform) + } + platformList = append(platformList, struct{ OS, Arch, Variant string }{ + OS: platform.OS, + Arch: platform.Architecture, + Variant: platform.Variant, + }) + logger.Debugf("base images all support %q", platform) + } + } + } + if len(platformList) == 0 { + return nil, errors.New("base images have no platforms in common") + } + return platformList, nil +} + +// baseImages parses the dockerfilecontents, possibly replacing the first +// stage's base image with FROM, and returns the list of base images as +// provided. Each entry in the dockerfilenames slice corresponds to a slice in +// dockerfilecontents. +func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string) ([]string, error) { + mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) + if err != nil { + return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0]) + } + + for i, d := range dockerfilecontents[1:] { + additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d)) + if err != nil { + return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i]) + } + mainNode.Children = append(mainNode.Children, additionalNode.Children...) + } + + b := imagebuilder.NewBuilder(args) + defaultContainerConfig, err := config.Default() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container config") + } + b.Env = defaultContainerConfig.GetDefaultEnv() + stages, err := imagebuilder.NewStages(mainNode, b) + if err != nil { + return nil, errors.Wrap(err, "error reading multiple stages") + } + var baseImages []string + nicknames := make(map[string]bool) + for stageIndex, stage := range stages { + node := stage.Node // first line + for node != nil { // each line + for _, child := range node.Children { // tokens on this line, though we only care about the first + switch strings.ToUpper(child.Value) { // first token - instruction + case "FROM": + if child.Next != nil { // second token on this line + // If we have a fromOverride, replace the value of + // image name for the first FROM in the Containerfile. + if from != "" { + child.Next.Value = from + from = "" + } + base := child.Next.Value + if base != "scratch" && !nicknames[base] { + // TODO: this didn't undergo variable and arg + // expansion, so if the AS clause in another + // FROM instruction uses argument values, + // we might not record the right value here. + baseImages = append(baseImages, base) + } + } + } + } + node = node.Next // next line + } + if stage.Name != strconv.Itoa(stageIndex) { + nicknames[stage.Name] = true + } + } + return baseImages, nil +} diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 78606d2b4ec..bbc1def0c4d 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -18,6 +18,7 @@ import ( "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" "github.com/containers/common/libimage" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -57,6 +58,7 @@ var builtinAllowedBuildArgs = map[string]bool{ // interface. It coordinates the entire build by using one or more // StageExecutors to handle each stage of the build. type Executor struct { + containerSuffix string logger *logrus.Logger stages map[string]*StageExecutor store storage.Store @@ -84,47 +86,53 @@ type Executor struct { configureNetwork define.NetworkConfigurationPolicy cniPluginPath string cniConfigDir string - idmappingOptions *define.IDMappingOptions - commonBuildOptions *define.CommonBuildOptions - defaultMountsFilePath string - iidfile string - squash bool - labels []string - annotations []string - layers bool - useCache bool - removeIntermediateCtrs bool - forceRmIntermediateCtrs bool - imageMap map[string]string // Used to map images that we create to handle the AS construct. - containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. - baseMap map[string]bool // Holds the names of every base image, as given. - rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. - blobDirectory string - excludes []string - unusedArgs map[string]struct{} - capabilities []string - devices define.ContainerDevices - signBy string - architecture string - timestamp *time.Time - os string - maxPullPushRetries int - retryPullPushDelay time.Duration - ociDecryptConfig *encconfig.DecryptConfig - lastError error - terminatedStage map[string]error - stagesLock sync.Mutex - stagesSemaphore *semaphore.Weighted - jobs int - logRusage bool - rusageLogFile io.Writer - imageInfoLock sync.Mutex - imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs - fromOverride string - manifest string - secrets map[string]string - sshsources map[string]*sshagent.Source - logPrefix string + // NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks. + networkInterface nettypes.ContainerNetwork + idmappingOptions *define.IDMappingOptions + commonBuildOptions *define.CommonBuildOptions + defaultMountsFilePath string + iidfile string + squash bool + labels []string + annotations []string + layers bool + noHosts bool + useCache bool + removeIntermediateCtrs bool + forceRmIntermediateCtrs bool + imageMap map[string]string // Used to map images that we create to handle the AS construct. + containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. + baseMap map[string]bool // Holds the names of every base image, as given. + rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. + blobDirectory string + excludes []string + ignoreFile string + unusedArgs map[string]struct{} + capabilities []string + devices define.ContainerDevices + signBy string + architecture string + timestamp *time.Time + os string + maxPullPushRetries int + retryPullPushDelay time.Duration + ociDecryptConfig *encconfig.DecryptConfig + lastError error + terminatedStage map[string]error + stagesLock sync.Mutex + stagesSemaphore *semaphore.Weighted + logRusage bool + rusageLogFile io.Writer + imageInfoLock sync.Mutex + imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs + fromOverride string + manifest string + secrets map[string]define.Secret + sshsources map[string]*sshagent.Source + logPrefix string + unsetEnvs []string + processLabel string // Shares processLabel of first stage container with containers of other stages in same build + mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build } type imageTypeAndHistoryAndDiffIDs struct { @@ -143,7 +151,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o excludes := options.Excludes if len(excludes) == 0 { - excludes, err = imagebuilder.ParseDockerignore(options.ContextDirectory) + excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile) if err != nil { return nil, err } @@ -179,10 +187,6 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o if err != nil { return nil, err } - jobs := 1 - if options.Jobs != nil { - jobs = *options.Jobs - } writer := options.ReportWriter if options.Quiet { @@ -203,11 +207,13 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o } exec := Executor{ + containerSuffix: options.ContainerSuffix, logger: logger, stages: make(map[string]*StageExecutor), store: store, contextDir: options.ContextDirectory, excludes: excludes, + ignoreFile: options.IgnoreFile, pullPolicy: options.PullPolicy, registry: options.Registry, ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions, @@ -231,6 +237,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o configureNetwork: options.ConfigureNetwork, cniPluginPath: options.CNIPluginPath, cniConfigDir: options.CNIConfigDir, + networkInterface: options.NetworkInterface, idmappingOptions: options.IDMappingOptions, commonBuildOptions: options.CommonBuildOpts, defaultMountsFilePath: options.DefaultMountsFilePath, @@ -239,6 +246,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o labels: append([]string{}, options.Labels...), annotations: append([]string{}, options.Annotations...), layers: options.Layers, + noHosts: options.CommonBuildOpts.NoHosts, useCache: !options.NoCache, removeIntermediateCtrs: options.RemoveIntermediateCtrs, forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, @@ -259,7 +267,6 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o ociDecryptConfig: options.OciDecryptConfig, terminatedStage: make(map[string]error), stagesSemaphore: options.JobSemaphore, - jobs: jobs, logRusage: options.LogRusage, rusageLogFile: rusageLogFile, imageInfoCache: make(map[string]imageTypeAndHistoryAndDiffIDs), @@ -268,6 +275,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o secrets: secrets, sshsources: sshsources, logPrefix: logPrefix, + unsetEnvs: options.UnsetEnvs, } if exec.err == nil { exec.err = os.Stderr @@ -292,9 +300,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o // and value, or just an argument, since they can be // separated by either "=" or whitespace. list := strings.SplitN(arg.Value, "=", 2) - if _, stillUnused := exec.unusedArgs[list[0]]; stillUnused { - delete(exec.unusedArgs, list[0]) - } + delete(exec.unusedArgs, list[0]) } } break @@ -353,7 +359,7 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) { found := false for _, otherStage := range stages { - if otherStage.Name == name || fmt.Sprintf("%d", otherStage.Position) == name { + if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name { found = true break } @@ -521,9 +527,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image lastErr = err } } + cleanupStages = nil b.stagesLock.Unlock() - cleanupStages = nil // Clean up any builders that we used to get data from images. for _, builder := range b.containerMap { if err := builder.Delete(); err != nil { @@ -628,36 +634,53 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image Error error } - ch := make(chan Result) + ch := make(chan Result, len(stages)) if b.stagesSemaphore == nil { - jobs := int64(b.jobs) - if jobs < 0 { - return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer") - } else if jobs == 0 { - jobs = int64(len(stages)) - } - - b.stagesSemaphore = semaphore.NewWeighted(jobs) + b.stagesSemaphore = semaphore.NewWeighted(int64(len(stages))) } var wg sync.WaitGroup wg.Add(len(stages)) go func() { + cancel := false for stageIndex := range stages { index := stageIndex // Acquire the semaphore before creating the goroutine so we are sure they // run in the specified order. if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil { + cancel = true b.lastError = err - return + ch <- Result{ + Index: index, + Error: err, + } + wg.Done() + continue } + b.stagesLock.Lock() + cleanupStages := cleanupStages + b.stagesLock.Unlock() go func() { defer b.stagesSemaphore.Release(1) defer wg.Done() + if cancel || cleanupStages == nil { + var err error + if stages[index].Name != strconv.Itoa(index) { + err = errors.Errorf("not building stage %d: build canceled", index) + } else { + err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name) + } + ch <- Result{ + Index: index, + Error: err, + } + return + } stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index) if stageErr != nil { + cancel = true ch <- Result{ Index: index, Error: stageErr, @@ -684,7 +707,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image b.stagesLock.Lock() b.terminatedStage[stage.Name] = r.Error - b.terminatedStage[fmt.Sprintf("%d", stage.Position)] = r.Error + b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error if r.Error != nil { b.stagesLock.Unlock() diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index ad0caed2877..06fed6a3b59 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -15,8 +15,11 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" buildahdocker "github.com/containers/buildah/docker" + "github.com/containers/buildah/internal" + "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/rusage" "github.com/containers/buildah/util" + config "github.com/containers/common/pkg/config" cp "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" @@ -47,21 +50,22 @@ import ( // If we're naming the result of the build, only the last stage will apply that // name to the image that it produces. type StageExecutor struct { - ctx context.Context - executor *Executor - log func(format string, args ...interface{}) - index int - stages imagebuilder.Stages - name string - builder *buildah.Builder - preserved int - volumes imagebuilder.VolumeSet - volumeCache map[string]string - volumeCacheInfo map[string]os.FileInfo - mountPoint string - output string - containerIDs []string - stage *imagebuilder.Stage + ctx context.Context + executor *Executor + log func(format string, args ...interface{}) + index int + stages imagebuilder.Stages + name string + builder *buildah.Builder + preserved int + volumes imagebuilder.VolumeSet + volumeCache map[string]string + volumeCacheInfo map[string]os.FileInfo + mountPoint string + output string + containerIDs []string + stage *imagebuilder.Stage + argsFromContainerfile []string } // Preserve informs the stage executor that from this point on, it needs to @@ -401,6 +405,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err PreserveOwnership: preserveOwnership, ContextDir: contextDir, Excludes: copyExcludes, + IgnoreFile: s.executor.ignoreFile, IDMappingOptions: idMappingOptions, StripSetuidBit: stripSetuid, StripSetgidBit: stripSetgid, @@ -412,10 +417,67 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err return nil } +// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided +// Stage can automatically cleanup this mounts when a stage is removed +// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run. +// stages mounted here will we used be Run(). +func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) { + stageMountPoints := make(map[string]internal.StageMountDetails) + for _, flag := range mountList { + if strings.Contains(flag, "from") { + arr := strings.SplitN(flag, ",", 2) + if len(arr) < 2 { + return nil, errors.Errorf("Invalid --mount command: %s", flag) + } + tokens := strings.Split(arr[1], ",") + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "from": + if len(kv) == 1 { + return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument") + } + if kv[1] == "" { + return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value") + } + from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments()) + if fromErr != nil { + return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1]) + } + // If the source's name corresponds to the + // result of an earlier stage, wait for that + // stage to finish being built. + if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil { + return nil, err + } + if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index { + stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: otherStage.mountPoint} + break + } else { + mountPoint, err := s.getImageRootfs(s.ctx, from) + if err != nil { + return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from) + } + stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint} + break + } + default: + continue + } + } + } + } + return stageMountPoints, nil +} + // Run executes a RUN instruction using the stage's current working container // as a root directory. func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { logrus.Debugf("RUN %#v, %#v", run, config) + stageMountPoints, err := s.runStageMountPoints(run.Mounts) + if err != nil { + return err + } if s.builder == nil { return errors.Errorf("no build container available") } @@ -433,12 +495,14 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { Hostname: config.Hostname, Runtime: s.executor.runtime, Args: s.executor.runtimeArgs, + NoHosts: s.executor.noHosts, NoPivot: os.Getenv("BUILDAH_NOPIVOT") != "", Mounts: append([]Mount{}, s.executor.transientMounts...), Env: config.Env, User: config.User, WorkingDir: config.WorkingDir, Entrypoint: config.Entrypoint, + ContextDir: s.executor.contextDir, Cmd: config.Cmd, Stdin: stdin, Stdout: s.executor.out, @@ -449,6 +513,8 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { Secrets: s.executor.secrets, SSHSources: s.executor.sshsources, RunMounts: run.Mounts, + StageMountPoints: stageMountPoints, + SystemContext: s.executor.systemContext, } if config.NetworkDisabled { options.ConfigureNetwork = buildah.NetworkDisabled @@ -533,20 +599,38 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo } } + builderSystemContext := s.executor.systemContext + // get platform string from stage + if stage.Builder.Platform != "" { + os, arch, variant, err := parse.Platform(stage.Builder.Platform) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse platform %q", stage.Builder.Platform) + } + if arch != "" || variant != "" { + builderSystemContext.ArchitectureChoice = arch + builderSystemContext.VariantChoice = variant + } + if os != "" { + builderSystemContext.OSChoice = os + } + } + builderOptions := buildah.BuilderOptions{ Args: ib.Args, FromImage: from, PullPolicy: pullPolicy, + ContainerSuffix: s.executor.containerSuffix, Registry: s.executor.registry, BlobDirectory: s.executor.blobDirectory, SignaturePolicyPath: s.executor.signaturePolicyPath, ReportWriter: s.executor.reportWriter, - SystemContext: s.executor.systemContext, + SystemContext: builderSystemContext, Isolation: s.executor.isolation, NamespaceOptions: s.executor.namespaceOptions, ConfigureNetwork: s.executor.configureNetwork, CNIPluginPath: s.executor.cniPluginPath, CNIConfigDir: s.executor.cniConfigDir, + NetworkInterface: s.executor.networkInterface, IDMappingOptions: s.executor.idmappingOptions, CommonBuildOpts: s.executor.commonBuildOptions, DefaultMountsFilePath: s.executor.defaultMountsFilePath, @@ -556,6 +640,9 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo MaxPullRetries: s.executor.maxPullPushRetries, PullRetryDelay: s.executor.retryPullPushDelay, OciDecryptConfig: s.executor.ociDecryptConfig, + Logger: s.executor.logger, + ProcessLabel: s.executor.processLabel, + MountLabel: s.executor.mountLabel, } builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions) @@ -563,6 +650,16 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo return nil, errors.Wrapf(err, "error creating build container") } + // If executor's ProcessLabel and MountLabel is empty means this is the first stage + // Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages + // Doing this will ensure and one stage in same build can mount another stage even if `selinux` + // is enabled. + + if s.executor.mountLabel == "" && s.executor.processLabel == "" { + s.executor.mountLabel = builder.MountLabel + s.executor.processLabel = builder.ProcessLabel + } + if initializeIBConfig { volumes := map[string]struct{}{} for _, v := range builder.Volumes() { @@ -582,6 +679,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo Volumes: volumes, WorkingDir: builder.WorkDir(), Entrypoint: builder.Entrypoint(), + Healthcheck: (*docker.HealthConfig)(builder.Healthcheck()), Labels: builder.Labels(), Shell: builder.Shell(), StopSignal: builder.StopSignal(), @@ -673,8 +771,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, checkForLayers := s.executor.layers && s.executor.useCache moreStages := s.index < len(s.stages)-1 lastStage := !moreStages - imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)]) - rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)]) + imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)]) + rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)]) // If the base image's name corresponds to the result of an earlier // stage, make sure that stage has finished building an image, and @@ -971,7 +1069,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } - if cacheID != "" && !(s.executor.squash && lastInstruction) { + // We want to save history for other layers during a squashed build. + // Toggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + squashToggle := false + // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. + // So only perform commit if its the lastInstruction of lastStage. + if cacheID != "" { logCacheHit(cacheID) // A suitable cached image was found, so we can just // reuse it. If we need to add a name to the resulting @@ -985,6 +1089,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } } else { + if s.executor.squash { + // We want to save history for other layers during a squashed build. + // squashToggle flag allows executor to treat other instruction and layers + // as regular builds and only perform squashing at last + s.executor.squash = false + squashToggle = true + } // We're not going to find any more cache hits, so we // can stop looking for them. checkForLayers = false @@ -996,6 +1107,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) } } + + // Perform final squash for this build as we are one the, + // last instruction of last stage + if (s.executor.squash || squashToggle) && lastInstruction && lastStage { + s.executor.squash = true + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName) + if err != nil { + return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step) + } + } + logImageID(imgID) // Update our working container to be based off of the cached @@ -1110,10 +1232,15 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri } switch strings.ToUpper(node.Value) { case "ARG": - buildArgs := s.getBuildArgs() + for _, variable := range strings.Fields(node.Original) { + if variable != "ARG" { + s.argsFromContainerfile = append(s.argsFromContainerfile, variable) + } + } + buildArgs := s.getBuildArgsKey() return "/bin/sh -c #(nop) ARG " + buildArgs case "RUN": - buildArgs := s.getBuildArgs() + buildArgs := s.getBuildArgsResolvedForRun() if buildArgs != "" { return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] } @@ -1131,10 +1258,71 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri // getBuildArgs returns a string of the build-args specified during the build process // it excludes any build-args that were not used in the build process -func (s *StageExecutor) getBuildArgs() string { - buildArgs := s.stage.Builder.Arguments() - sort.Strings(buildArgs) - return strings.Join(buildArgs, " ") +// values for args are overridden by the values specified using ENV. +// Reason: Values from ENV will always override values specified arg. +func (s *StageExecutor) getBuildArgsResolvedForRun() string { + var envs []string + configuredEnvs := make(map[string]string) + dockerConfig := s.stage.Builder.Config() + + for _, env := range dockerConfig.Env { + splitv := strings.SplitN(env, "=", 2) + if len(splitv) == 2 { + configuredEnvs[splitv[0]] = splitv[1] + } + } + + for key, value := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + // if value was in image it will be given higher priority + // so please embed that into build history + _, inImage := configuredEnvs[key] + if inImage { + envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key])) + } else { + // By default everything must be added to history. + // Following variable is configured to false only for special cases. + addToHistory := true + + // Following value is being assigned from build-args, + // check if this key belongs to any of the predefined allowlist args e.g Proxy Variables + // and if that arg is not manually set in Containerfile/Dockerfile + // then don't write its value to history. + // Following behaviour ensures parity with docker/buildkit. + for _, variable := range config.ProxyEnv { + if key == variable { + // found in predefined args + // so don't add to history + // unless user did explicit `ARG ` + addToHistory = false + for _, processedArg := range s.argsFromContainerfile { + if key == processedArg { + addToHistory = true + } + } + } + } + if addToHistory { + envs = append(envs, fmt.Sprintf("%s=%s", key, value)) + } + } + } + } + sort.Strings(envs) + return strings.Join(envs, " ") +} + +// getBuildArgs key returns set args are key which were specified during the build process +// following function will be exclusively used by build history +func (s *StageExecutor) getBuildArgsKey() string { + var envs []string + for key := range s.stage.Builder.Args { + if _, ok := s.stage.Builder.AllowedArgs[key]; ok { + envs = append(envs, key) + } + } + sort.Strings(envs) + return strings.Join(envs, " ") } // tagExistingImage adds names to an image already in the store @@ -1364,6 +1552,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer RetryDelay: s.executor.retryPullPushDelay, HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, + UnsetEnvs: s.executor.unsetEnvs, } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { @@ -1381,5 +1570,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } func (s *StageExecutor) EnsureContainerPath(path string) error { - return copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{}) + return s.builder.EnsureContainerPathAs(path, "", nil) +} + +func (s *StageExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return s.builder.EnsureContainerPathAs(path, user, mode) } diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 0029a95e27b..a4fcee47686 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -83,6 +83,11 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system return nil, err } + netInt, err := getNetworkInterface(store, "", "") + if err != nil { + return nil, err + } + builder := &Builder{ store: store, Type: containerType, @@ -100,6 +105,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system UIDMap: uidmap, GIDMap: gidmap, }, + NetworkInterface: netInt, } if err := builder.initConfig(ctx, image, systemContext); err != nil { diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index f0bf92ddf33..12b69c9ba60 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -11,10 +11,12 @@ import ( "strings" "time" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/util" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -43,8 +45,10 @@ func Info(store storage.Store) ([]InfoData, error) { func hostInfo() map[string]interface{} { info := map[string]interface{}{} - info["os"] = runtime.GOOS - info["arch"] = runtime.GOARCH + ps := platforms.Normalize(v1.Platform{OS: runtime.GOOS, Architecture: runtime.GOARCH}) + info["os"] = ps.OS + info["arch"] = ps.Architecture + info["variant"] = ps.Variant info["cpus"] = runtime.NumCPU() info["rootless"] = unshare.IsRootless() diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index 2a09821f3c1..02a81be6fb8 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -20,32 +20,6 @@ lags the upstream release. sudo yum -y install buildah ``` -The [Kubic project](https://build.opensuse.org/project/show/devel:kubic:libcontainers:stable) -provides updated packages for CentOS 8 and CentOS 8 Stream. - -```bash -# CentOS 8 -sudo dnf -y module disable container-tools -sudo dnf -y install 'dnf-command(copr)' -sudo dnf -y copr enable rhcontainerbot/container-selinux -sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8/devel:kubic:libcontainers:stable.repo -# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc -sudo dnf -y --refresh install runc -# Install Buildah -sudo dnf -y --refresh install buildah - -# CentOS 8 Stream -sudo dnf -y module disable container-tools -sudo dnf -y install 'dnf-command(copr)' -sudo dnf -y copr enable rhcontainerbot/container-selinux -sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_8_Stream/devel:kubic:libcontainers:stable.repo -# OPTIONAL FOR RUNC USERS: crun will be installed by default. Install runc first if you prefer runc -sudo dnf -y --refresh install runc -# Install Buildah -sudo dnf -y --refresh install buildah -``` - - #### [Debian](https://debian.org) The buildah package is available in @@ -127,26 +101,6 @@ sudo apt-get -y update sudo apt-get -y install buildah ``` -If you would prefer newer (though not as well-tested) packages, -the [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/buildah) -provides packages for active Ubuntu releases 20.04 and newer (it should also work with direct derivatives like Pop!\_OS). -The packages in Kubic project repos are more frequently updated than the one in Ubuntu's official repositories, due to how Debian/Ubuntu works. -Checkout the Kubic project page for a list of supported Ubuntu version and architecture combinations. -The build sources for the Kubic packages can be found [here](https://gitlab.com/rhcontainerbot/buildah/-/tree/debian/debian). - -CAUTION: On Ubuntu 20.10 and newer, we highly recommend you use Buildah, Podman and Skopeo ONLY from EITHER the Kubic repo -OR the official Ubuntu repos. Mixing and matching may lead to unpredictable situations including installation conflicts. - - -```bash -. /etc/os-release -sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${ID^}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" -wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${ID^}_${VERSION_ID}/Release.key -O Release.key -sudo apt-key add - < Release.key -sudo apt-get update -qq -sudo apt-get -qq -y install buildah -``` - # Building from scratch ## System Requirements @@ -254,9 +208,7 @@ Then to install Buildah on Fedora follow the steps in this example: ### RHEL, CentOS -In RHEL and CentOS 7, ensure that you are subscribed to the `rhel-7-server-rpms`, -`rhel-7-server-extras-rpms`, `rhel-7-server-optional-rpms` and `EPEL` repositories, then -run this command: +In RHEL and CentOS, run this command to install the build dependencies: ``` yum -y install \ @@ -278,11 +230,6 @@ run this command: The build steps for Buildah on RHEL or CentOS are the same as for Fedora, above. -*NOTE:* Buildah on RHEL or CentOS version 7.* is not supported running as non-root due to -these systems not having newuidmap or newgidmap installed. It is possible to pull -the shadow-utils source RPM from Fedora 29 and build and install from that in order to -run Buildah as non-root on these systems. - ### openSUSE On openSUSE Tumbleweed, install go via `zypper in go`, then run this command: diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go new file mode 100644 index 00000000000..832b2b9abae --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/parse/parse.go @@ -0,0 +1,608 @@ +package parse + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/buildah/internal" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +const ( + // TypeBind is the type for mounting host dir + TypeBind = "bind" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" + // mount=type=cache allows users to lock a cache store while its being used by another build + BuildahCacheLockfile = "buildah-cache-lockfile" +) + +var ( + errBadMntOption = errors.New("invalid mount option") + errBadOptionArg = errors.New("must provide an argument for option") + errBadVolDest = errors.New("must set volume destination") + errBadVolSrc = errors.New("must set volume source") + errDuplicateDest = errors.Errorf("duplicate mount destination") +) + +// GetBindMount parses a single bind mount entry from the --mount flag. +// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. +// Caller is expected to perform unmount of any mounted images +func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, string, error) { + newMount := specs.Mount{ + Type: TypeBind, + } + + mountReadability := false + setDest := false + bindNonRecursive := false + fromImage := "" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") + bindNonRecursive = true + case "ro", "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + mountReadability = true + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + mountReadability = true + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + mountReadability = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + case "from": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + fromImage = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "src", "source": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, "", errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, "", err + } + newMount.Destination = kv[1] + setDest = true + case "consistency": + // Option for OS X only, has no meaning on other platforms + // and can thus be safely ignored. + // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts + default: + return newMount, "", errors.Wrapf(errBadMntOption, kv[0]) + } + } + + // default mount readability is always readonly + if !mountReadability { + newMount.Options = append(newMount.Options, "ro") + } + + // Following variable ensures that we return imagename only if we did additional mount + isImageMounted := false + if fromImage != "" { + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromImage]; ok { + mountPoint = val.MountPoint + } + } + // if mountPoint of image was not found in additionalMap + // or additionalMap was nil, try mounting image + if mountPoint == "" { + image, err := internalUtil.LookupImage(ctx, store, fromImage) + if err != nil { + return newMount, "", err + } + + mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) + if err != nil { + return newMount, "", err + } + isImageMounted = true + } + contextDir = mountPoint + } + + // buildkit parity: default bind option must be `rbind` + // unless specified + if !bindNonRecursive { + newMount.Options = append(newMount.Options, "rbind") + } + + if !setDest { + return newMount, fromImage, errBadVolDest + } + + // buildkit parity: support absolute path for sources from current build context + if contextDir != "" { + // path should be /contextDir/specified path + newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // looks like its coming from `build run --mount=type=bind` allow using absolute path + // error out if no source is set + if newMount.Source == "" { + return newMount, "", errBadVolSrc + } + if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { + return newMount, "", err + } + } + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, fromImage, err + } + newMount.Options = opts + + if !isImageMounted { + // we don't want any cleanups if image was not mounted explicitly + // so dont return anything + fromImage = "" + } + + return newMount, fromImage, nil +} + +// GetCacheMount parses a single cache mount entry from the --mount flag. +func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, []string, error) { + var err error + var mode uint64 + lockedTargets := make([]string, 0) + var ( + setDest bool + setShared bool + setReadOnly bool + ) + fromStage := "" + newMount := specs.Mount{ + Type: TypeBind, + } + // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id + id := "" + //buidkit parity: cache directory defaults to 755 + mode = 0o755 + //buidkit parity: cache directory defaults to uid 0 if not specified + uid := 0 + //buidkit parity: cache directory defaults to gid 0 if not specified + gid := 0 + // sharing mode + sharing := "shared" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + case "readonly", "ro": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + setReadOnly = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + setShared = true + case "sharing": + sharing = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, kv[1]) + case "id": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + id = kv[1] + case "from": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + fromStage = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, lockedTargets, err + } + newMount.Destination = kv[1] + setDest = true + case "src", "source": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Source = kv[1] + case "mode": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + mode, err = strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache mode") + } + case "uid": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + uid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache uid") + } + case "gid": + if len(kv) == 1 { + return newMount, lockedTargets, errors.Wrapf(errBadOptionArg, kv[0]) + } + gid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to parse cache gid") + } + default: + return newMount, lockedTargets, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, lockedTargets, errBadVolDest + } + + if fromStage != "" { + // do not create cache on host + // instead use read-only mounted stage as cache + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromStage]; ok { + if val.IsStage { + mountPoint = val.MountPoint + } + } + } + // Cache does not supports using image so if not stage found + // return with error + if mountPoint == "" { + return newMount, lockedTargets, fmt.Errorf("no stage found with name %s", fromStage) + } + // path should be /contextDir/specified path + newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // we need to create cache on host if no image is being used + + // since type is cache and cache can be reused by consecutive builds + // create a common cache directory, which persists on hosts within temp lifecycle + // add subdirectory if specified + + // cache parent directory + cacheParent := filepath.Join(getTempDir(), BuildahCacheDir) + // create cache on host if not present + err = os.MkdirAll(cacheParent, os.FileMode(0755)) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to create build cache directory") + } + + if id != "" { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) + } else { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) + } + idPair := idtools.IDPair{ + UID: uid, + GID: gid, + } + //buildkit parity: change uid and gid if specified otheriwise keep `0` + err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to change uid,gid of cache directory") + } + } + + switch sharing { + case "locked": + // lock parent cache + lockfile, err := lockfile.GetLockfile(filepath.Join(newMount.Source, BuildahCacheLockfile)) + if err != nil { + return newMount, lockedTargets, errors.Wrapf(err, "Unable to acquire lock when sharing mode is locked") + } + // Will be unlocked after the RUN step is executed. + lockfile.Lock() + lockedTargets = append(lockedTargets, filepath.Join(newMount.Source, BuildahCacheLockfile)) + case "shared": + // do nothing since default is `shared` + break + default: + // error out for unknown values + return newMount, lockedTargets, errors.Wrapf(err, "Unrecognized value %q for field `sharing`", sharing) + } + + // buildkit parity: default sharing should be shared + // unless specified + if !setShared { + newMount.Options = append(newMount.Options, "shared") + } + + // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly + if !setReadOnly { + newMount.Options = append(newMount.Options, "rw") + } + + newMount.Options = append(newMount.Options, "bind") + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, lockedTargets, err + } + newMount.Options = opts + + return newMount, lockedTargets, nil +} + +// ValidateVolumeMountHostDir validates the host path of buildah --volume +func ValidateVolumeMountHostDir(hostDir string) error { + if !filepath.IsAbs(hostDir) { + return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) + } + if _, err := os.Stat(hostDir); err != nil { + return errors.WithStack(err) + } + return nil +} + +// RevertEscapedColon converts "\:" to ":" +func RevertEscapedColon(source string) string { + return strings.ReplaceAll(source, "\\:", ":") +} + +// SplitStringWithColonEscape splits string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator +func SplitStringWithColonEscape(str string) []string { + result := make([]string, 0, 3) + sb := &strings.Builder{} + for idx, r := range str { + if r == ':' { + // the colon is backslash-escaped + if idx-1 > 0 && str[idx-1] == '\\' { + sb.WriteRune(r) + } else { + // os.Stat will fail if path contains escaped colon + result = append(result, RevertEscapedColon(sb.String())) + sb.Reset() + } + } else { + sb.WriteRune(r) + } + } + if sb.Len() > 0 { + result = append(result, RevertEscapedColon(sb.String())) + } + return result +} + +func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { + finalVolumeMounts := make(map[string]specs.Mount) + + for _, volume := range volumes { + volumeMount, err := Volume(volume) + if err != nil { + return nil, err + } + if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { + return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) + } + finalVolumeMounts[volumeMount.Destination] = volumeMount + } + return finalVolumeMounts, nil +} + +// Volume parses the input of --volume +func Volume(volume string) (specs.Mount, error) { + mount := specs.Mount{} + arr := SplitStringWithColonEscape(volume) + if len(arr) < 2 { + return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) + } + if err := ValidateVolumeMountHostDir(arr[0]); err != nil { + return mount, err + } + if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil { + return mount, err + } + mountOptions := "" + if len(arr) > 2 { + mountOptions = arr[2] + if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil { + return mount, err + } + } + mountOpts := strings.Split(mountOptions, ",") + mount.Source = arr[0] + mount.Destination = arr[1] + mount.Type = "rbind" + mount.Options = mountOpts + return mount, nil +} + +// GetVolumes gets the volumes from --volume and --mount +func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, []string, error) { + unifiedMounts, mountedImages, lockedTargets, err := getMounts(ctx, store, mounts, contextDir) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + volumeMounts, err := getVolumeMounts(volumes) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + for dest, mount := range volumeMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, dest) + } + unifiedMounts[dest] = mount + } + + finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) + for _, mount := range unifiedMounts { + finalMounts = append(finalMounts, mount) + } + return finalMounts, mountedImages, lockedTargets, nil +} + +// getMounts takes user-provided input from the --mount flag and creates OCI +// spec mounts. +// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// buildah run --mount type=tmpfs,target=/dev/shm ... +func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []string, error) { + finalMounts := make(map[string]specs.Mount) + mountedImages := make([]string, 0) + lockedTargets := make([]string, 0) + + errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + + // TODO(vrothberg): the manual parsing can be replaced with a regular expression + // to allow a more robust parsing of the mount format and to give + // precise errors regarding supported format versus supported options. + for _, mount := range mounts { + arr := strings.SplitN(mount, ",", 2) + if len(arr) < 2 { + return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + kv := strings.Split(arr[0], "=") + // TODO: type is not explicitly required in Docker. + // If not specified, it defaults to "volume". + if len(kv) != 2 || kv[0] != "type" { + return nil, mountedImages, lockedTargets, errors.Wrapf(errInvalidSyntax, "%q", mount) + } + + tokens := strings.Split(arr[1], ",") + switch kv[1] { + case TypeBind: + mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + mountedImages = append(mountedImages, image) + case TypeCache: + mount, lockedPaths, err := GetCacheMount(tokens, store, "", nil) + lockedTargets = lockedPaths + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + case TypeTmpfs: + mount, err := GetTmpfsMount(tokens) + if err != nil { + return nil, mountedImages, lockedTargets, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, lockedTargets, errors.Wrapf(errDuplicateDest, mount.Destination) + } + finalMounts[mount.Destination] = mount + default: + return nil, mountedImages, lockedTargets, errors.Errorf("invalid filesystem type %q", kv[1]) + } + } + + return finalMounts, mountedImages, lockedTargets, nil +} + +// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag +func GetTmpfsMount(args []string) (specs.Mount, error) { + newMount := specs.Mount{ + Type: TypeTmpfs, + Source: TypeTmpfs, + } + + setDest := false + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "ro", "nosuid", "nodev", "noexec": + newMount.Options = append(newMount.Options, kv[0]) + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + case "tmpcopyup": + //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. + newMount.Options = append(newMount.Options, kv[0]) + case "tmpfs-mode": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + case "tmpfs-size": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + case "src", "source": + return newMount, errors.Errorf("source is not supported with tmpfs mounts") + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, errors.Wrapf(errBadOptionArg, kv[0]) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = kv[1] + setDest = true + default: + return newMount, errors.Wrapf(errBadMntOption, kv[0]) + } + } + + if !setDest { + return newMount, errBadVolDest + } + + return newMount, nil +} + +/* This is internal function and could be changed at any time */ +/* for external usage please refer to buildah/pkg/parse.GetTempDir() */ +func getTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + return tmpdir + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/buildah/internal/types.go b/vendor/github.com/containers/buildah/internal/types.go new file mode 100644 index 00000000000..8ddff99fb75 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/types.go @@ -0,0 +1,11 @@ +package internal + +// Types is internal packages are suspected to change with releases avoid using these outside of buildah + +// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor +// StageExecutor has ability to mount stages/images in current context and +// automatically clean them up. +type StageMountDetails struct { + IsStage bool // tells if mountpoint returned from stage executor is stage or image + MountPoint string // mountpoint of stage/image +} diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go new file mode 100644 index 00000000000..cce50816740 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -0,0 +1,24 @@ +package util + +import ( + "github.com/containers/common/libimage" + "github.com/containers/image/v5/types" + "github.com/containers/storage" +) + +// LookupImage returns *Image to corresponding imagename or id +func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*libimage.Image, error) { + systemContext := ctx + if systemContext == nil { + systemContext = &types.SystemContext{} + } + runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext}) + if err != nil { + return nil, err + } + localImage, _, err := runtime.LookupImage(image, nil) + if err != nil { + return nil, err + } + return localImage, nil +} diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go index 85a0f0b31b0..c7e330c1302 100644 --- a/vendor/github.com/containers/buildah/new.go +++ b/vendor/github.com/containers/buildah/new.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/containers/buildah/define" - "github.com/containers/buildah/pkg/blobcache" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/image" @@ -113,6 +112,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions options.FromImage = "" } + if options.NetworkInterface == nil { + // create the network interface + // Note: It is important to do this before we pull any images/create containers. + // The default backend detection logic needs an empty store to correctly detect + // that we can use netavark, if the store was not empty it will use CNI to not break existing installs. + options.NetworkInterface, err = getNetworkInterface(store, options.CNIConfigDir, options.CNIPluginPath) + if err != nil { + return nil, err + } + } + systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath) if options.FromImage != "" && options.FromImage != "scratch" { @@ -134,14 +144,11 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions pullOptions.OciDecryptConfig = options.OciDecryptConfig pullOptions.SignaturePolicyPath = options.SignaturePolicyPath pullOptions.Writer = options.ReportWriter + pullOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) maxRetries := uint(options.MaxPullRetries) pullOptions.MaxRetries = &maxRetries - if options.BlobDirectory != "" { - pullOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) - } - pulledImages, err := imageRuntime.Pull(ctx, options.FromImage, pullPolicy, &pullOptions) if err != nil { return nil, err @@ -197,6 +204,9 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions } name := "working-container" + if options.ContainerSuffix != "" { + name = options.ContainerSuffix + } if options.Container != "" { name = options.Container } else { @@ -216,9 +226,20 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions conflict := 100 for { + + var flags map[string]interface{} + // check if we have predefined ProcessLabel and MountLabel + // this could be true if this is another stage in a build + if options.ProcessLabel != "" && options.MountLabel != "" { + flags = map[string]interface{}{ + "ProcessLabel": options.ProcessLabel, + "MountLabel": options.MountLabel, + } + } coptions := storage.ContainerOptions{ LabelOpts: options.CommonBuildOpts.LabelOpts, IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions), + Flags: flags, Volatile: true, } container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions) @@ -283,13 +304,15 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions UIDMap: uidmap, GIDMap: gidmap, }, - Capabilities: copyStringSlice(options.Capabilities), - CommonBuildOpts: options.CommonBuildOpts, - TopLayer: topLayer, - Args: options.Args, - Format: options.Format, - TempVolumes: map[string]bool{}, - Devices: options.Devices, + Capabilities: copyStringSlice(options.Capabilities), + CommonBuildOpts: options.CommonBuildOpts, + TopLayer: topLayer, + Args: options.Args, + Format: options.Format, + TempVolumes: map[string]bool{}, + Devices: options.Devices, + Logger: options.Logger, + NetworkInterface: options.NetworkInterface, } if options.Mount { diff --git a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go index 8dadec13084..fa60619ac62 100644 --- a/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go +++ b/vendor/github.com/containers/buildah/pkg/blobcache/blobcache.go @@ -1,39 +1,8 @@ package blobcache import ( - "bytes" - "context" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/containers/buildah/docker" - "github.com/containers/common/libimage" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/compression" - "github.com/containers/image/v5/transports" + imageBlobCache "github.com/containers/image/v5/pkg/blobcache" "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - digest "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - _ types.ImageReference = &blobCacheReference{} - _ types.ImageSource = &blobCacheSource{} - _ types.ImageDestination = &blobCacheDestination{} -) - -const ( - compressedNote = ".compressed" - decompressedNote = ".decompressed" ) // BlobCache is an object which saves copies of blobs that are written to it while passing them @@ -52,517 +21,11 @@ type BlobCache interface { ClearCache() error } -type blobCacheReference struct { - reference types.ImageReference - // WARNING: The contents of this directory may be accessed concurrently, - // both within this process and by multiple different processes - directory string - compress types.LayerCompression -} - -type blobCacheSource struct { - reference *blobCacheReference - source types.ImageSource - sys types.SystemContext - // this mutex synchronizes the counters below - mu sync.Mutex - cacheHits int64 - cacheMisses int64 - cacheErrors int64 -} - -type blobCacheDestination struct { - reference *blobCacheReference - destination types.ImageDestination -} - -func makeFilename(blobSum digest.Digest, isConfig bool) string { - if isConfig { - return blobSum.String() + ".config" - } - return blobSum.String() -} - -// CacheLookupReferenceFunc wraps a BlobCache into a -// libimage.LookupReferenceFunc to allow for using a BlobCache during -// image-copy operations. -func CacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc { - // NOTE: this prevents us from moving BlobCache around and generalizes - // the libimage API. - return func(ref types.ImageReference) (types.ImageReference, error) { - ref, err := NewBlobCache(ref, directory, compress) - if err != nil { - return nil, errors.Wrapf(err, "error using blobcache %q", directory) - } - return ref, nil - } -} - // NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are // written to the destination image created from the resulting reference will also be stored -// as-is to the specified directory or a temporary directory. The cache directory's contents -// can be cleared by calling the returned BlobCache()'s ClearCache() method. +// as-is to the specified directory or a temporary directory. // The compress argument controls whether or not the cache will try to substitute a compressed // or different version of a blob when preparing the list of layers when reading an image. func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (BlobCache, error) { - if directory == "" { - return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) - } - switch compress { - case types.Compress, types.Decompress, types.PreserveOriginal: - // valid value, accept it - default: - return nil, errors.Errorf("unhandled LayerCompression value %v", compress) - } - return &blobCacheReference{ - reference: ref, - directory: directory, - compress: compress, - }, nil -} - -func (r *blobCacheReference) Transport() types.ImageTransport { - return r.reference.Transport() -} - -func (r *blobCacheReference) StringWithinTransport() string { - return r.reference.StringWithinTransport() -} - -func (r *blobCacheReference) DockerReference() reference.Named { - return r.reference.DockerReference() -} - -func (r *blobCacheReference) PolicyConfigurationIdentity() string { - return r.reference.PolicyConfigurationIdentity() -} - -func (r *blobCacheReference) PolicyConfigurationNamespaces() []string { - return r.reference.PolicyConfigurationNamespaces() -} - -func (r *blobCacheReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return r.reference.DeleteImage(ctx, sys) -} - -func (r *blobCacheReference) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { - if blobinfo.Digest == "" { - return false, -1, nil - } - - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(r.directory, makeFilename(blobinfo.Digest, isConfig)) - fileInfo, err := os.Stat(filename) - if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { - return true, fileInfo.Size(), nil - } - if !os.IsNotExist(err) { - return false, -1, errors.Wrap(err, "checking size") - } - } - - return false, -1, nil -} - -func (r *blobCacheReference) Directory() string { - return r.directory -} - -func (r *blobCacheReference) ClearCache() error { - f, err := os.Open(r.directory) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - names, err := f.Readdirnames(-1) - if err != nil { - return errors.Wrapf(err, "error reading directory %q", r.directory) - } - for _, name := range names { - pathname := filepath.Join(r.directory, name) - if err = os.RemoveAll(pathname); err != nil { - return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(r)) - } - } - return nil -} - -func (r *blobCacheReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(r.reference)) - } - return image.FromSource(ctx, sys, src) -} - -func (r *blobCacheReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - src, err := r.reference.NewImageSource(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(r.reference)) - } - logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(r.reference), r.directory, r.compress) - return &blobCacheSource{reference: r, source: src, sys: *sys}, nil -} - -func (r *blobCacheReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - dest, err := r.reference.NewImageDestination(ctx, sys) - if err != nil { - return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(r.reference)) - } - logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(r.reference), r.directory) - return &blobCacheDestination{reference: r, destination: dest}, nil -} - -func (s *blobCacheSource) Reference() types.ImageReference { - return s.reference -} - -func (s *blobCacheSource) Close() error { - logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) - return s.source.Close() -} - -func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) - manifestBytes, err := ioutil.ReadFile(filename) - if err == nil { - s.cacheHits++ - return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil - } - if !os.IsNotExist(err) { - s.cacheErrors++ - return nil, "", errors.Wrap(err, "checking for manifest file") - } - } - s.cacheMisses++ - return s.source.GetManifest(ctx, instanceDigest) -} - -func (s *blobCacheSource) HasThreadSafeGetBlob() bool { - return s.source.HasThreadSafeGetBlob() -} - -func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - present, size, err := s.reference.HasBlob(blobinfo) - if err != nil { - return nil, -1, err - } - if present { - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - s.mu.Lock() - s.cacheHits++ - s.mu.Unlock() - return f, size, nil - } - if !os.IsNotExist(err) { - s.mu.Lock() - s.cacheErrors++ - s.mu.Unlock() - return nil, -1, errors.Wrap(err, "checking for cache") - } - } - } - s.mu.Lock() - s.cacheMisses++ - s.mu.Unlock() - rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) - if err != nil { - return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) - } - return rc, size, nil -} - -func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.source.GetSignatures(ctx, instanceDigest) -} - -func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - signatures, err := s.source.GetSignatures(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) - } - canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) - - infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) - if err != nil { - return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - if infos == nil { - img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) - if err != nil { - return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) - } - infos = img.LayerInfos() - } - - if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { - replacedInfos := make([]types.BlobInfo, 0, len(infos)) - for _, info := range infos { - var replaceDigest []byte - var err error - blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) - alternate := "" - switch s.reference.compress { - case types.Compress: - alternate = blobFile + compressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - case types.Decompress: - alternate = blobFile + decompressedNote - replaceDigest, err = ioutil.ReadFile(alternate) - } - if err == nil && digest.Digest(replaceDigest).Validate() == nil { - alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) - fileInfo, err := os.Stat(alternate) - if err == nil { - switch info.MediaType { - case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: - switch s.reference.compress { - case types.Compress: - info.MediaType = v1.MediaTypeImageLayerGzip - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - info.MediaType = v1.MediaTypeImageLayer - info.CompressionAlgorithm = nil - } - case docker.V2S2MediaTypeUncompressedLayer, manifest.DockerV2Schema2LayerMediaType: - switch s.reference.compress { - case types.Compress: - info.MediaType = manifest.DockerV2Schema2LayerMediaType - info.CompressionAlgorithm = &compression.Gzip - case types.Decompress: - // nope, not going to suggest anything, it's not allowed by the spec - replacedInfos = append(replacedInfos, info) - continue - } - } - logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) - info.CompressionOperation = s.reference.compress - info.Digest = digest.Digest(replaceDigest) - info.Size = fileInfo.Size() - logrus.Debugf("info = %#v", info) - } - } - replacedInfos = append(replacedInfos, info) - } - infos = replacedInfos - } - - return infos, nil -} - -func (d *blobCacheDestination) Reference() types.ImageReference { - return d.reference -} - -func (d *blobCacheDestination) Close() error { - logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) - return d.destination.Close() -} - -func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { - return d.destination.SupportedManifestMIMETypes() -} - -func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { - return d.destination.SupportsSignatures(ctx) -} - -func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { - return d.destination.DesiredLayerCompression() -} - -func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { - return d.destination.AcceptsForeignLayerURLs() -} - -func (d *blobCacheDestination) MustMatchRuntimeOS() bool { - return d.destination.MustMatchRuntimeOS() -} - -func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { - return d.destination.IgnoresEmbeddedDockerReference() -} - -// Decompress and save the contents of the decompressReader stream into the passed-in temporary -// file. If we successfully save all of the data, rename the file to match the digest of the data, -// and make notes about the relationship between the file that holds a copy of the compressed data -// and this new file. -func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { - defer wg.Done() - // Decompress from and digest the reading end of that pipe. - decompressed, err3 := archive.DecompressStream(decompressReader) - digester := digest.Canonical.Digester() - if err3 == nil { - // Read the decompressed data through the filter over the pipe, blocking until the - // writing end is closed. - _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) - } else { - // Drain the pipe to keep from stalling the PutBlob() thread. - if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil { - logrus.Debugf("error draining the pipe: %v", err) - } - } - decompressReader.Close() - decompressed.Close() - tempFile.Close() - // Determine the name that we should give to the uncompressed copy of the blob. - decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) - if err3 == nil { - // Rename the temporary file. - if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { - logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } else { - *alternateDigest = digester.Digest() - // Note the relationship between the two files. - if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { - logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) - } - if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { - logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) - } - } - } else { - // Remove the temporary file. - if err3 = os.Remove(tempFile.Name()); err3 != nil { - logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) - } - } -} - -func (d *blobCacheDestination) HasThreadSafePutBlob() bool { - return d.destination.HasThreadSafePutBlob() -} - -func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - var tempfile *os.File - var err error - var n int - var alternateDigest digest.Digest - var closer io.Closer - wg := new(sync.WaitGroup) - needToWait := false - compression := archive.Uncompressed - if inputInfo.Digest != "" { - filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err == nil { - stream = io.TeeReader(stream, tempfile) - defer func() { - if err == nil { - if err = os.Rename(tempfile.Name(), filename); err != nil { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) - } - } else { - if err2 := os.Remove(tempfile.Name()); err2 != nil { - logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) - } - } - tempfile.Close() - }() - } else { - logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) - } - if !isConfig { - initial := make([]byte, 8) - n, err = stream.Read(initial) - if n > 0 { - // Build a Reader that will still return the bytes that we just - // read, for PutBlob()'s sake. - stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) - if n >= len(initial) { - compression = archive.DetectCompression(initial[:n]) - } - if compression == archive.Gzip { - // The stream is compressed, so create a file which we'll - // use to store a decompressed copy. - decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) - if err2 != nil { - logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) - decompressedTemp.Close() - } else { - // Write a copy of the compressed data to a pipe, - // closing the writing end of the pipe after - // PutBlob() returns. - decompressReader, decompressWriter := io.Pipe() - closer = decompressWriter - stream = io.TeeReader(stream, decompressWriter) - // Let saveStream() close the reading end and handle the temporary file. - wg.Add(1) - needToWait = true - go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) - } - } - } - } - } - newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) - if closer != nil { - closer.Close() - } - if needToWait { - wg.Wait() - } - if err != nil { - return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) - } - if alternateDigest.Validate() == nil { - logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) - } else { - logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) - } - return newBlobInfo, nil -} - -func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) - if err != nil || present { - return present, reusedInfo, err - } - - for _, isConfig := range []bool{false, true} { - filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) - f, err := os.Open(filename) - if err == nil { - defer f.Close() - uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) - if err != nil { - return false, types.BlobInfo{}, err - } - return true, uploadedInfo, nil - } - } - - return false, types.BlobInfo{}, nil -} - -func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { - manifestDigest, err := manifest.Digest(manifestBytes) - if err != nil { - logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) - } else { - filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) - if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { - logrus.Warnf("error saving manifest as %q: %v", filename, err) - } - } - return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) -} - -func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - return d.destination.PutSignatures(ctx, signatures, instanceDigest) -} - -func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return d.destination.Commit(ctx, unparsedToplevel) + return imageBlobCache.NewBlobCache(ref, directory, compress) } diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 8ee4ab6d1f6..c325bc5cfb3 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -18,6 +18,40 @@ import ( "golang.org/x/sys/unix" ) +// Options type holds various configuration options for overlay +// MountWithOptions accepts following type so it is easier to specify +// more verbose configuration for overlay mount. +type Options struct { + // The Upper directory is normally writable layer in an overlay mount. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to UpperDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + UpperDirOptionFragment string + // The Workdir is used to prepare files as they are switched between the layers. + // Note!! : Following API does not handles escaping or validates correctness of the values + // passed to WorkDirOptionFragment instead API will try to pass values as is it + // to the `mount` command. It is user's responsibility to make sure they pre-validate + // these values. Invalid inputs may lead to undefined behviour. + // This is provided as-is, use it if it works for you, we can/will change/break that in the future. + // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959 + // TODO: Should we address above comment and handle escaping of metacharacters like + // `comma`, `backslash` ,`colon` and any other special characters + WorkDirOptionFragment string + // Graph options relayed from podman, will be responsible for choosing mount program + GraphOpts []string + // Mark if following overlay is read only + ReadOnly bool + // RootUID is not used yet but keeping it here for legacy reasons. + RootUID int + // RootGID is not used yet but keeping it here for legacy reasons. + RootGID int +} + // TempDir generates an overlay Temp directory in the container content func TempDir(containerDir string, rootUID, rootGID int) (string, error) { contentDir := filepath.Join(containerDir, "overlay") @@ -65,7 +99,8 @@ func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string // from the source system. It then mounts up the source directory on to the // generated mount point and returns the mount point to the caller. func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { - return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, false) + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: false, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) } // MountReadOnly creates a subdir of the contentDir based on the source directory @@ -73,26 +108,71 @@ func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions [ // generated mount point and returns the mount point to the caller. Note that no // upper layer will be created rendering it a read-only mount func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) { - return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, true) + overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: true, RootUID: rootUID, RootGID: rootGID} + return MountWithOptions(contentDir, source, dest, &overlayOpts) } -// NOTE: rootUID and rootUID are not yet used. -func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []string, readOnly bool) (mount specs.Mount, Err error) { +// findMountProgram finds if any mount program is specified in the graph options. +func findMountProgram(graphOptions []string) string { + mountMap := map[string]bool{ + ".mount_program": true, + "overlay.mount_program": true, + "overlay2.mount_program": true, + } + + for _, i := range graphOptions { + s := strings.SplitN(i, "=", 2) + if len(s) != 2 { + continue + } + key := s[0] + val := s[1] + if mountMap[key] { + return val + } + } + + return "" +} + +// mountWithMountProgram mount an overlay at mergeDir using the specified mount program +// and overlay options. +func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error { + cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) + + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "exec %s", mountProgram) + } + return nil +} + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { mergeDir := filepath.Join(contentDir, "merge") // Create overlay mount options for rw/ro. var overlayOptions string - if readOnly { + if opts.ReadOnly { // Read-only overlay mounts require two lower layer. lowerTwo := filepath.Join(contentDir, "lower") if err := os.Mkdir(lowerTwo, 0755); err != nil { return mount, err } - overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", source, lowerTwo) + overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) } else { // Read-write overlay mounts want a lower, upper and a work layer. workDir := filepath.Join(contentDir, "work") upperDir := filepath.Join(contentDir, "upper") + + if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { + workDir = opts.WorkDirOptionFragment + upperDir = opts.UpperDirOptionFragment + } + st, err := os.Stat(source) if err != nil { return mount, err @@ -105,45 +185,24 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin return mount, err } } - - overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir) + overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) } - if unshare.IsRootless() { - mountProgram := "" - - mountMap := map[string]bool{ - ".mount_program": true, - "overlay.mount_program": true, - "overlay2.mount_program": true, - } - - for _, i := range graphOptions { - s := strings.SplitN(i, "=", 2) - if len(s) != 2 { - continue - } - key := s[0] - val := s[1] - if mountMap[key] { - mountProgram = val - break - } + mountProgram := findMountProgram(opts.GraphOpts) + if mountProgram != "" { + if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { + return mount, err } - if mountProgram != "" { - cmd := exec.Command(mountProgram, "-o", overlayOptions, mergeDir) - if err := cmd.Run(); err != nil { - return mount, errors.Wrapf(err, "exec %s", mountProgram) - } + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "bind" + mount.Options = []string{"bind", "slave"} + return mount, nil + } - mount.Source = mergeDir - mount.Destination = dest - mount.Type = "bind" - mount.Options = []string{"bind", "slave"} - return mount, nil - } - /* If a mount_program is not specified, fallback to try mount native overlay. */ + if unshare.IsRootless() { + /* If a mount_program is not specified, fallback to try mounting native overlay. */ overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) } @@ -155,6 +214,11 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin return mount, nil } +// Convert ":" to "\:", the path which will be overlay mounted need to be escaped +func escapeColon(source string) string { + return strings.ReplaceAll(source, ":", "\\:") +} + // RemoveTemp removes temporary mountpoint and all content from its parent // directory func RemoveTemp(contentDir string) error { diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 685d63d31f9..a3851622b12 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -9,12 +9,13 @@ import ( "net" "os" "path/filepath" - "runtime" "strconv" "strings" "unicode" + "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" + internalParse "github.com/containers/buildah/internal/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/common/pkg/parse" "github.com/containers/image/v5/types" @@ -22,9 +23,11 @@ import ( "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/openshift/imagebuilder" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/spf13/pflag" "golang.org/x/term" ) @@ -37,17 +40,20 @@ const ( TypeBind = "bind" // TypeTmpfs is the type for mounting tmpfs TypeTmpfs = "tmpfs" -) - -var ( - errBadMntOption = errors.Errorf("invalid mount option") - errDuplicateDest = errors.Errorf("duplicate mount destination") - optionArgError = errors.Errorf("must provide an argument for option") - noDestError = errors.Errorf("must set volume destination") + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + BuildahCacheDir = "buildah-cache" ) // CommonBuildOptions parses the build options from the bud cli func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { + return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// CommonBuildOptionsFromFlagSet parses the build options from the bud cli +func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) { var ( memoryLimit int64 memorySwap int64 @@ -55,7 +61,7 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { err error ) - memVal, _ := c.Flags().GetString("memory") + memVal, _ := flags.GetString("memory") if memVal != "" { memoryLimit, err = units.RAMInBytes(memVal) if err != nil { @@ -63,16 +69,25 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { } } - memSwapValue, _ := c.Flags().GetString("memory-swap") + memSwapValue, _ := flags.GetString("memory-swap") if memSwapValue != "" { - memorySwap, err = units.RAMInBytes(memSwapValue) - if err != nil { - return nil, errors.Wrapf(err, "invalid value for memory-swap") + if memSwapValue == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(memSwapValue) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for memory-swap") + } } } - addHost, _ := c.Flags().GetStringSlice("add-host") + noHosts, _ := flags.GetBool("no-hosts") + + addHost, _ := flags.GetStringSlice("add-host") if len(addHost) > 0 { + if noHosts { + return nil, errors.Errorf("--no-hosts and --add-host conflict, can not be used together") + } for _, host := range addHost { if err := validateExtraHost(host); err != nil { return nil, errors.Wrapf(err, "invalid value for add-host") @@ -82,8 +97,8 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { noDNS = false dnsServers := []string{} - if c.Flag("dns").Changed { - dnsServers, _ = c.Flags().GetStringSlice("dns") + if flags.Changed("dns") { + dnsServers, _ = flags.GetStringSlice("dns") for _, server := range dnsServers { if strings.ToLower(server) == "none" { noDNS = true @@ -95,62 +110,63 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { } dnsSearch := []string{} - if c.Flag("dns-search").Changed { - dnsSearch, _ = c.Flags().GetStringSlice("dns-search") + if flags.Changed("dns-search") { + dnsSearch, _ = flags.GetStringSlice("dns-search") if noDNS && len(dnsSearch) > 0 { return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none") } } dnsOptions := []string{} - if c.Flag("dns-option").Changed { - dnsOptions, _ = c.Flags().GetStringSlice("dns-option") + if flags.Changed("dns-option") { + dnsOptions, _ = flags.GetStringSlice("dns-option") if noDNS && len(dnsOptions) > 0 { return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none") } } - if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil { + if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil { return nil, errors.Wrapf(err, "invalid --shm-size") } - volumes, _ := c.Flags().GetStringArray("volume") + volumes, _ := flags.GetStringArray("volume") if err := Volumes(volumes); err != nil { return nil, err } - cpuPeriod, _ := c.Flags().GetUint64("cpu-period") - cpuQuota, _ := c.Flags().GetInt64("cpu-quota") - cpuShares, _ := c.Flags().GetUint64("cpu-shares") - httpProxy, _ := c.Flags().GetBool("http-proxy") + cpuPeriod, _ := flags.GetUint64("cpu-period") + cpuQuota, _ := flags.GetInt64("cpu-quota") + cpuShares, _ := flags.GetUint64("cpu-shares") + httpProxy, _ := flags.GetBool("http-proxy") ulimit := []string{} - if c.Flag("ulimit").Changed { - ulimit, _ = c.Flags().GetStringSlice("ulimit") + if flags.Changed("ulimit") { + ulimit, _ = flags.GetStringSlice("ulimit") } - secrets, _ := c.Flags().GetStringArray("secret") - sshsources, _ := c.Flags().GetStringArray("ssh") + secrets, _ := flags.GetStringArray("secret") + sshsources, _ := flags.GetStringArray("ssh") commonOpts := &define.CommonBuildOptions{ AddHost: addHost, CPUPeriod: cpuPeriod, CPUQuota: cpuQuota, - CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(), - CPUSetMems: c.Flag("cpuset-mems").Value.String(), + CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(), + CPUSetMems: findFlagFunc("cpuset-mems").Value.String(), CPUShares: cpuShares, - CgroupParent: c.Flag("cgroup-parent").Value.String(), + CgroupParent: findFlagFunc("cgroup-parent").Value.String(), DNSOptions: dnsOptions, DNSSearch: dnsSearch, DNSServers: dnsServers, HTTPProxy: httpProxy, Memory: memoryLimit, MemorySwap: memorySwap, - ShmSize: c.Flag("shm-size").Value.String(), + NoHosts: noHosts, + ShmSize: findFlagFunc("shm-size").Value.String(), Ulimit: ulimit, Volumes: volumes, Secrets: secrets, SSHSources: sshsources, } - securityOpts, _ := c.Flags().GetStringArray("security-opt") + securityOpts, _ := flags.GetStringArray("security-opt") if err := parseSecurityOpts(securityOpts, commonOpts); err != nil { return nil, err } @@ -199,32 +215,14 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti return nil } +// Split string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator +func SplitStringWithColonEscape(str string) []string { + return internalParse.SplitStringWithColonEscape(str) +} + // Volume parses the input of --volume func Volume(volume string) (specs.Mount, error) { - mount := specs.Mount{} - arr := strings.SplitN(volume, ":", 3) - if len(arr) < 2 { - return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume) - } - if err := validateVolumeMountHostDir(arr[0]); err != nil { - return mount, err - } - if err := parse.ValidateVolumeCtrDir(arr[1]); err != nil { - return mount, err - } - mountOptions := "" - if len(arr) > 2 { - mountOptions = arr[2] - if _, err := parse.ValidateVolumeOpts(strings.Split(arr[2], ",")); err != nil { - return mount, err - } - } - mountOpts := strings.Split(mountOptions, ",") - mount.Source = arr[0] - mount.Destination = arr[1] - mount.Type = "rbind" - mount.Options = mountOpts - return mount, nil + return internalParse.Volume(volume) } // Volumes validates the host and container paths passed in to the --volume flag @@ -240,236 +238,11 @@ func Volumes(volumes []string) error { return nil } -func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { - finalVolumeMounts := make(map[string]specs.Mount) - - for _, volume := range volumes { - volumeMount, err := Volume(volume) - if err != nil { - return nil, err - } - if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, volumeMount.Destination) - } - finalVolumeMounts[volumeMount.Destination] = volumeMount - } - return finalVolumeMounts, nil -} - -// GetVolumes gets the volumes from --volume and --mount -func GetVolumes(volumes []string, mounts []string) ([]specs.Mount, error) { - unifiedMounts, err := getMounts(mounts) - if err != nil { - return nil, err - } - volumeMounts, err := getVolumeMounts(volumes) - if err != nil { - return nil, err - } - for dest, mount := range volumeMounts { - if _, ok := unifiedMounts[dest]; ok { - return nil, errors.Wrapf(errDuplicateDest, dest) - } - unifiedMounts[dest] = mount - } - - finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) - for _, mount := range unifiedMounts { - finalMounts = append(finalMounts, mount) - } - return finalMounts, nil -} - -// getMounts takes user-provided input from the --mount flag and creates OCI -// spec mounts. -// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... -// buildah run --mount type=tmpfs,target=/dev/shm ... -func getMounts(mounts []string) (map[string]specs.Mount, error) { - finalMounts := make(map[string]specs.Mount) - - errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=,[src=,]target=[,options]") - - // TODO(vrothberg): the manual parsing can be replaced with a regular expression - // to allow a more robust parsing of the mount format and to give - // precise errors regarding supported format versus supported options. - for _, mount := range mounts { - arr := strings.SplitN(mount, ",", 2) - if len(arr) < 2 { - return nil, errors.Wrapf(errInvalidSyntax, "%q", mount) - } - kv := strings.Split(arr[0], "=") - // TODO: type is not explicitly required in Docker. - // If not specified, it defaults to "volume". - if len(kv) != 2 || kv[0] != "type" { - return nil, errors.Wrapf(errInvalidSyntax, "%q", mount) - } - - tokens := strings.Split(arr[1], ",") - switch kv[1] { - case TypeBind: - mount, err := GetBindMount(tokens) - if err != nil { - return nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, mount.Destination) - } - finalMounts[mount.Destination] = mount - case TypeTmpfs: - mount, err := GetTmpfsMount(tokens) - if err != nil { - return nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, errors.Wrapf(errDuplicateDest, mount.Destination) - } - finalMounts[mount.Destination] = mount - default: - return nil, errors.Errorf("invalid filesystem type %q", kv[1]) - } - } - - return finalMounts, nil -} - -// GetBindMount parses a single bind mount entry from the --mount flag. -func GetBindMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeBind, - } - - setSource := false - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "bind-nonrecursive": - newMount.Options = append(newMount.Options, "bind") - case "ro", "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z": - newMount.Options = append(newMount.Options, kv[0]) - case "bind-propagation": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, kv[1]) - case "src", "source": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeHostDir(kv[1]); err != nil { - return newMount, err - } - newMount.Source = kv[1] - setSource = true - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - case "consistency": - // Option for OS X only, has no meaning on other platforms - // and can thus be safely ignored. - // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts - default: - return newMount, errors.Wrapf(errBadMntOption, kv[0]) - } - } - - if !setDest { - return newMount, noDestError - } - - if !setSource { - newMount.Source = newMount.Destination - } - - opts, err := parse.ValidateVolumeOpts(newMount.Options) - if err != nil { - return newMount, err - } - newMount.Options = opts - - return newMount, nil -} - -// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag -func GetTmpfsMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeTmpfs, - Source: TypeTmpfs, - } - - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "ro", "nosuid", "nodev", "noexec": - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "tmpfs-mode": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) - case "tmpfs-size": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) - case "src", "source": - return newMount, errors.Errorf("source is not supported with tmpfs mounts") - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, errors.Wrapf(optionArgError, kv[0]) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - default: - return newMount, errors.Wrapf(errBadMntOption, kv[0]) - } - } - - if !setDest { - return newMount, noDestError - } - - return newMount, nil -} - // ValidateVolumeHostDir validates a volume mount's source directory func ValidateVolumeHostDir(hostDir string) error { return parse.ValidateVolumeHostDir(hostDir) } -// validates the host path of buildah --volume -func validateVolumeMountHostDir(hostDir string) error { - if !filepath.IsAbs(hostDir) { - return errors.Errorf("invalid host path, must be an absolute path %q", hostDir) - } - if _, err := os.Stat(hostDir); err != nil { - return errors.WithStack(err) - } - return nil -} - // ValidateVolumeCtrDir validates a volume mount's destination directory. func ValidateVolumeCtrDir(ctrDir string) error { return parse.ValidateVolumeCtrDir(ctrDir) @@ -508,20 +281,26 @@ func validateIPAddress(val string) (string, error) { // SystemContextFromOptions returns a SystemContext populated with values // per the input parameters provided by the caller for the use in authentication. func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { - certDir, err := c.Flags().GetString("cert-dir") + return SystemContextFromFlagSet(c.Flags(), c.Flag) +} + +// SystemContextFromFlagSet returns a SystemContext populated with values +// per the input parameters provided by the caller for the use in authentication. +func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*types.SystemContext, error) { + certDir, err := flags.GetString("cert-dir") if err != nil { certDir = "" } ctx := &types.SystemContext{ DockerCertPath: certDir, } - tlsVerify, err := c.Flags().GetBool("tls-verify") - if err == nil && c.Flag("tls-verify").Changed { + tlsVerify, err := flags.GetBool("tls-verify") + if err == nil && findFlagFunc("tls-verify").Changed { ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify) ctx.OCIInsecureSkipTLSVerify = !tlsVerify ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify } - disableCompression, err := c.Flags().GetBool("disable-compression") + disableCompression, err := flags.GetBool("disable-compression") if err == nil { if disableCompression { ctx.OCIAcceptUncompressedLayers = true @@ -529,59 +308,59 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) { ctx.DirForceCompress = true } } - creds, err := c.Flags().GetString("creds") - if err == nil && c.Flag("creds").Changed { + creds, err := flags.GetString("creds") + if err == nil && findFlagFunc("creds").Changed { var err error ctx.DockerAuthConfig, err = AuthConfig(creds) if err != nil { return nil, err } } - sigPolicy, err := c.Flags().GetString("signature-policy") - if err == nil && c.Flag("signature-policy").Changed { + sigPolicy, err := flags.GetString("signature-policy") + if err == nil && findFlagFunc("signature-policy").Changed { ctx.SignaturePolicyPath = sigPolicy } - authfile, err := c.Flags().GetString("authfile") + authfile, err := flags.GetString("authfile") if err == nil { ctx.AuthFilePath = getAuthFile(authfile) } - regConf, err := c.Flags().GetString("registries-conf") - if err == nil && c.Flag("registries-conf").Changed { + regConf, err := flags.GetString("registries-conf") + if err == nil && findFlagFunc("registries-conf").Changed { ctx.SystemRegistriesConfPath = regConf } - regConfDir, err := c.Flags().GetString("registries-conf-dir") - if err == nil && c.Flag("registries-conf-dir").Changed { + regConfDir, err := flags.GetString("registries-conf-dir") + if err == nil && findFlagFunc("registries-conf-dir").Changed { ctx.RegistriesDirPath = regConfDir } - shortNameAliasConf, err := c.Flags().GetString("short-name-alias-conf") - if err == nil && c.Flag("short-name-alias-conf").Changed { + shortNameAliasConf, err := flags.GetString("short-name-alias-conf") + if err == nil && findFlagFunc("short-name-alias-conf").Changed { ctx.UserShortNameAliasConfPath = shortNameAliasConf } ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", define.Version) - if c.Flag("os") != nil && c.Flag("os").Changed { + if findFlagFunc("os") != nil && findFlagFunc("os").Changed { var os string - if os, err = c.Flags().GetString("os"); err != nil { + if os, err = flags.GetString("os"); err != nil { return nil, err } ctx.OSChoice = os } - if c.Flag("arch") != nil && c.Flag("arch").Changed { + if findFlagFunc("arch") != nil && findFlagFunc("arch").Changed { var arch string - if arch, err = c.Flags().GetString("arch"); err != nil { + if arch, err = flags.GetString("arch"); err != nil { return nil, err } ctx.ArchitectureChoice = arch } - if c.Flag("variant") != nil && c.Flag("variant").Changed { + if findFlagFunc("variant") != nil && findFlagFunc("variant").Changed { var variant string - if variant, err = c.Flags().GetString("variant"); err != nil { + if variant, err = flags.GetString("variant"); err != nil { return nil, err } ctx.VariantChoice = variant } - if c.Flag("platform") != nil && c.Flag("platform").Changed { + if findFlagFunc("platform") != nil && findFlagFunc("platform").Changed { var specs []string - if specs, err = c.Flags().GetStringSlice("platform"); err != nil { + if specs, err = flags.GetStringSlice("platform"); err != nil { return nil, err } if len(specs) == 0 || specs[0] == "" { @@ -669,7 +448,7 @@ const platformSep = "/" // DefaultPlatform returns the standard platform for the current system func DefaultPlatform() string { - return runtime.GOOS + platformSep + runtime.GOARCH + return platforms.DefaultString() } // Platform separates the platform string into os, arch and variant, @@ -731,8 +510,13 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) { // IDMappingOptions parses the build options related to user namespaces and ID mapping. func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { - user := c.Flag("userns-uid-map-user").Value.String() - group := c.Flag("userns-gid-map-group").Value.String() + return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) +} + +// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping. +func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { + user := findFlagFunc("userns-uid-map-user").Value.String() + group := findFlagFunc("userns-gid-map-group").Value.String() // If only the user or group was specified, use the same value for the // other, since we need both in order to initialize the maps using the // names. @@ -751,7 +535,7 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } mappings = submappings } - globalOptions := c.PersistentFlags() + globalOptions := persistentFlags // We'll parse the UID and GID mapping options the same way. buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) { outmap := make([]specs.LinuxIDMapping, 0, len(basemap)) @@ -769,8 +553,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed { spec, _ = globalOptions.GetStringSlice(option) } - if c.Flag(option).Changed { - spec, _ = c.Flags().GetStringSlice(option) + if findFlagFunc(option).Changed { + spec, _ = flags.GetStringSlice(option) } idmap, err := parseIDMap(spec) if err != nil { @@ -811,8 +595,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } // If the user specifically requested that we either use or don't use // user namespaces, override that default. - if c.Flag("userns").Changed { - how := c.Flag("userns").Value.String() + if findFlagFunc("userns").Changed { + how := findFlagFunc("userns").Value.String() switch how { case "", "container", "private": usernsOption.Host = false @@ -829,13 +613,6 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio } usernsOptions = define.NamespaceOptions{usernsOption} - usernetwork := c.Flags().Lookup("network") - if usernetwork != nil && !usernetwork.Changed { - usernsOptions = append(usernsOptions, define.NamespaceOption{ - Name: string(specs.NetworkNamespace), - Host: usernsOption.Host, - }) - } // If the user requested that we use the host namespace, but also that // we use mappings, that's not going to work. if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host { @@ -877,23 +654,30 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) { // NamespaceOptions parses the build options for all namespaces except for user namespace. func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { + return NamespaceOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// NamespaceOptionsFromFlagSet parses the build options for all namespaces except for user namespace. +func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) { options := make(define.NamespaceOptions, 0, 7) policy := define.NetworkDefault - for _, what := range []string{string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} { - if c.Flags().Lookup(what) != nil && c.Flag(what).Changed { - how := c.Flag(what).Value.String() + for _, what := range []string{"cgroupns", string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} { + if flags.Lookup(what) != nil && findFlagFunc(what).Changed { + how := findFlagFunc(what).Value.String() switch what { - case "network": - what = string(specs.NetworkNamespace) + case "cgroupns": + what = string(specs.CgroupNamespace) } switch how { case "", "container", "private": logrus.Debugf("setting %q namespace to %q", what, "") + policy = define.NetworkEnabled options.AddOrReplace(define.NamespaceOption{ Name: what, }) case "host": logrus.Debugf("setting %q namespace to host", what) + policy = define.NetworkEnabled options.AddOrReplace(define.NamespaceOption{ Name: what, Host: true, @@ -910,8 +694,11 @@ func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOption } } how = strings.TrimPrefix(how, "ns:") - if _, err := os.Stat(how); err != nil { - return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go + if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) { + if _, err := os.Stat(how); err != nil { + return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what) + } } policy = define.NetworkEnabled logrus.Debugf("setting %q namespace to %q", what, how) @@ -1034,35 +821,60 @@ func GetTempDir() string { } // Secrets parses the --secret flag -func Secrets(secrets []string) (map[string]string, error) { - parsed := make(map[string]string) - invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar") +func Secrets(secrets []string) (map[string]define.Secret, error) { + invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]") + parsed := make(map[string]define.Secret) for _, secret := range secrets { - split := strings.Split(secret, ",") - if len(split) > 2 { + tokens := strings.Split(secret, ",") + var id, src, typ string + for _, val := range tokens { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "id": + id = kv[1] + case "src": + src = kv[1] + case "env": + src = kv[1] + typ = "env" + case "type": + if kv[1] != "file" && kv[1] != "env" { + return nil, errors.New("invalid secret type, must be file or env") + } + typ = kv[1] + } + } + if id == "" { return nil, invalidSyntax } - if len(split) == 2 { - id := strings.Split(split[0], "=") - src := strings.Split(split[1], "=") - if len(split) == 2 && strings.ToLower(id[0]) == "id" && strings.ToLower(src[0]) == "src" { - fullPath, err := filepath.Abs(src[1]) - if err != nil { - return nil, err - } - _, err = os.Stat(fullPath) - if err == nil { - parsed[id[1]] = fullPath - } - if err != nil { - return nil, errors.Wrap(err, "could not parse secrets") - } + if src == "" { + src = id + } + if typ == "" { + if _, ok := os.LookupEnv(id); ok { + typ = "env" } else { - return nil, invalidSyntax + typ = "file" } - } else { - return nil, invalidSyntax } + + if typ == "file" { + fullPath, err := filepath.Abs(src) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + _, err = os.Stat(fullPath) + if err != nil { + return nil, errors.Wrap(err, "could not parse secrets") + } + src = fullPath + } + newSecret := define.Secret{ + Source: src, + SourceType: typ, + } + parsed[id] = newSecret + } return parsed, nil } @@ -1085,3 +897,20 @@ func SSH(sshSources []string) (map[string]*sshagent.Source, error) { } return parsed, nil } + +func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) { + if path != "" { + excludes, err := imagebuilder.ParseIgnore(path) + return excludes, path, err + } + path = filepath.Join(contextDir, ".containerignore") + excludes, err := imagebuilder.ParseIgnore(path) + if os.IsNotExist(err) { + path = filepath.Join(contextDir, ".dockerignore") + excludes, err = imagebuilder.ParseIgnore(path) + } + if os.IsNotExist(err) { + return excludes, "", nil + } + return excludes, path, err +} diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go index d02ea2a4d8d..b985cd2b01a 100644 --- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -111,8 +111,9 @@ func (a *AgentServer) Serve(processLabel string) (string, error) { a.wg.Done() }() // the only way to get agent.ServeAgent is to close the connection it's serving on + // TODO: ideally we should use some sort of forwarding mechanism for output instead of manually closing connection. go func() { - time.Sleep(500 * time.Millisecond) + time.Sleep(2000 * time.Millisecond) c.Close() }() } diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go index 7149ac98631..3eddf549345 100644 --- a/vendor/github.com/containers/buildah/pull.go +++ b/vendor/github.com/containers/buildah/pull.go @@ -6,7 +6,6 @@ import ( "time" "github.com/containers/buildah/define" - "github.com/containers/buildah/pkg/blobcache" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" @@ -63,15 +62,13 @@ func Pull(ctx context.Context, imageName string, options PullOptions) (imageID s libimageOptions.OciDecryptConfig = options.OciDecryptConfig libimageOptions.AllTags = options.AllTags libimageOptions.RetryDelay = &options.RetryDelay + libimageOptions.DestinationLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) if options.MaxRetries > 0 { retries := uint(options.MaxRetries) libimageOptions.MaxRetries = &retries } - if options.BlobDirectory != "" { - libimageOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal) - } pullPolicy, err := config.ParsePullPolicy(options.PullPolicy.String()) if err != nil { diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go index 78adb69396c..65bda9ca011 100644 --- a/vendor/github.com/containers/buildah/push.go +++ b/vendor/github.com/containers/buildah/push.go @@ -10,6 +10,7 @@ import ( "github.com/containers/common/libimage" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" @@ -20,11 +21,30 @@ import ( "github.com/sirupsen/logrus" ) +// cacheLookupReferenceFunc wraps a BlobCache into a +// libimage.LookupReferenceFunc to allow for using a BlobCache during +// image-copy operations. +func cacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc { + // Using a closure here allows us to reference a BlobCache without + // having to explicitly maintain it in the libimage API. + return func(ref types.ImageReference) (types.ImageReference, error) { + if directory == "" { + return ref, nil + } + ref, err := blobcache.NewBlobCache(ref, directory, compress) + if err != nil { + return nil, errors.Wrapf(err, "error using blobcache %q", directory) + } + return ref, nil + } +} + // PushOptions can be used to alter how an image is copied somewhere. type PushOptions struct { // Compression specifies the type of compression which is applied to // layer blobs. The default is to not use compression, but // archive.Gzip is recommended. + // OBSOLETE: Use CompressionFormat instead. Compression archive.Compression // SignaturePolicyPath specifies an override location for the signature // policy which should be used for verifying the new image as it is @@ -71,6 +91,11 @@ type PushOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int } // Push copies the contents of the image to a new location. @@ -84,19 +109,19 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options libimageOptions.RetryDelay = &options.RetryDelay libimageOptions.OciEncryptConfig = options.OciEncryptConfig libimageOptions.OciEncryptLayers = options.OciEncryptLayers + libimageOptions.CompressionFormat = options.CompressionFormat + libimageOptions.CompressionLevel = options.CompressionLevel libimageOptions.PolicyAllowStorage = true if options.Quiet { libimageOptions.Writer = nil } - if options.BlobDirectory != "" { - compress := types.PreserveOriginal - if options.Compression == archive.Gzip { - compress = types.Compress - } - libimageOptions.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, compress) + compress := types.PreserveOriginal + if options.Compression == archive.Gzip { + compress = types.Compress } + libimageOptions.SourceLookupReferenceFunc = cacheLookupReferenceFunc(options.BlobDirectory, compress) runtime, err := libimage.RuntimeFromStore(options.Store, &libimage.RuntimeOptions{SystemContext: options.SystemContext}) if err != nil { diff --git a/vendor/github.com/containers/buildah/run.go b/vendor/github.com/containers/buildah/run.go index 935630cae34..e56aac8c9dc 100644 --- a/vendor/github.com/containers/buildah/run.go +++ b/vendor/github.com/containers/buildah/run.go @@ -5,7 +5,9 @@ import ( "io" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/image/v5/types" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -83,6 +85,8 @@ type RunOptions struct { Runtime string // Args adds global arguments for the runtime. Args []string + // NoHosts use the images /etc/hosts file + NoHosts bool // NoPivot adds the --no-pivot runtime flag. NoPivot bool // Mounts are additional mount points which we want to provide. @@ -93,6 +97,8 @@ type RunOptions struct { User string // WorkingDir is an override for the working directory. WorkingDir string + // ContextDir is used as the root directory for the source location for mounts that are of type "bind". + ContextDir string // Shell is default shell to run in a container. Shell string // Cmd is an override for the configured default command. @@ -139,20 +145,36 @@ type RunOptions struct { // Devices are the additional devices to add to the containers Devices define.ContainerDevices // Secrets are the available secrets to use in a RUN - Secrets map[string]string + Secrets map[string]define.Secret // SSHSources is the available ssh agents to use in a RUN SSHSources map[string]*sshagent.Source `json:"-"` // RunMounts are mounts for this run. RunMounts for this run // will not show up in subsequent runs. RunMounts []string + // Map of stages and container mountpoint if any from stage executor + StageMountPoints map[string]internal.StageMountDetails + // External Image mounts to be cleaned up. + // Buildah run --mount could mount image before RUN calls, RUN could cleanup + // them up as well + ExternalImageMounts []string + // System context of current build + SystemContext *types.SystemContext + // CgroupManager to use for running OCI containers + CgroupManager string } // RunMountArtifacts are the artifacts created when using a run mount. type runMountArtifacts struct { // RunMountTargets are the run mount targets inside the container RunMountTargets []string + // TmpFiles are artifacts that need to be removed outside the container + TmpFiles []string + // Any external images which were mounted inside container + MountedImages []string // Agents are the ssh agents started Agents []*sshagent.AgentServer // SSHAuthSock is the path to the ssh auth sock inside the container SSHAuthSock string + // LockedTargets to be unlocked if there are any. + LockedTargets []string } diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 113c83ef9a2..43720ee1440 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -1,10 +1,10 @@ +//go:build linux // +build linux package buildah import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -17,27 +17,37 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" - "github.com/containernetworking/cni/libcni" "github.com/containers/buildah/bind" "github.com/containers/buildah/chroot" "github.com/containers/buildah/copier" "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + internalParse "github.com/containers/buildah/internal/parse" + internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/overlay" + "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" + "github.com/containers/common/libnetwork/network" + nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" + "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/chown" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/defaultnet" "github.com/containers/common/pkg/subscriptions" + imagetypes "github.com/containers/image/v5/types" + "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/stringid" "github.com/containers/storage/pkg/unshare" + storagetypes "github.com/containers/storage/types" "github.com/docker/go-units" "github.com/docker/libnetwork/resolvconf" "github.com/docker/libnetwork/types" @@ -152,11 +162,16 @@ func (b *Builder) Run(command []string, options RunOptions) error { setupTerminal(g, options.Terminal, options.TerminalSize) - configureNetwork, configureNetworks, err := b.configureNamespaces(g, options) + configureNetwork, configureNetworks, err := b.configureNamespaces(g, &options) if err != nil { return err } + // rootless and networks are not supported + if len(configureNetworks) > 0 && isolation == IsolationOCIRootless { + return errors.New("cannot use networks as rootless") + } + homeDir, err := b.configureUIDGID(g, mountPoint, options) if err != nil { return err @@ -176,16 +191,19 @@ func (b *Builder) Run(command []string, options RunOptions) error { return err } - // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. - rootUID, rootGID, err := util.GetHostRootIDs(spec) - if err != nil { - return err + uid, gid := spec.Process.User.UID, spec.Process.User.GID + if spec.Linux != nil { + uid, gid, err = util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, uid, gid) + if err != nil { + return err + } } - rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)} mode := os.FileMode(0755) coptions := copier.MkdirOptions{ - ChownNew: rootIDPair, + ChownNew: idPair, ChmodNew: &mode, } if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil { @@ -196,7 +214,14 @@ func (b *Builder) Run(command []string, options RunOptions) error { namespaceOptions := append(b.NamespaceOptions, options.NamespaceOptions...) volumes := b.Volumes() - if !contains(volumes, "/etc/hosts") { + // Figure out who owns files that will appear to be owned by UID/GID 0 in the container. + rootUID, rootGID, err := util.GetHostRootIDs(spec) + if err != nil { + return err + } + rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} + + if !options.NoHosts && !contains(volumes, "/etc/hosts") { hostFile, err := b.generateHosts(path, spec.Hostname, b.CommonBuildOpts.AddHost, rootIDPair) if err != nil { return err @@ -247,7 +272,7 @@ rootless=%d bindFiles["/run/.containerenv"] = containerenvPath } - runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions, options.Secrets, options.SSHSources, options.RunMounts) + runArtifacts, err := b.setupMounts(options.SystemContext, mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize, namespaceOptions, options.Secrets, options.SSHSources, options.RunMounts, options.ContextDir, options.StageMountPoints) if err != nil { return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID) } @@ -256,27 +281,21 @@ rootless=%d spec.Process.Env = append(spec.Process.Env, sshenv) } + // following run was called from `buildah run` + // and some images were mounted for this run + // add them to cleanup artifacts + if len(options.ExternalImageMounts) > 0 { + runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...) + } + defer func() { - if err := cleanupRunMounts(mountPoint, runArtifacts); err != nil { - options.Logger.Errorf("unabe to cleanup run mounts %v", err) + if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil { + options.Logger.Errorf("unable to cleanup run mounts %v", err) } }() defer b.cleanupTempVolumes() - if options.CNIConfigDir == "" { - options.CNIConfigDir = b.CNIConfigDir - if b.CNIConfigDir == "" { - options.CNIConfigDir = define.DefaultCNIConfigDir - } - } - if options.CNIPluginPath == "" { - options.CNIPluginPath = b.CNIPluginPath - if b.CNIPluginPath == "" { - options.CNIPluginPath = define.DefaultCNIPluginPath - } - } - switch isolation { case define.IsolationOCI: var moreCreateArgs []string @@ -413,7 +432,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin return mounts, nil } -func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions, secrets map[string]string, sshSources map[string]*sshagent.Source, runFileMounts []string) (*runMountArtifacts, error) { +func (b *Builder) setupMounts(context *imagetypes.SystemContext, mountPoint string, spec *specs.Spec, bundlePath string, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string, namespaceOptions define.NamespaceOptions, secrets map[string]define.Secret, sshSources map[string]*sshagent.Source, runFileMounts []string, contextDir string, stageMountPoints map[string]internal.StageMountDetails) (*runMountArtifacts, error) { // Start building a new list of mounts. var mounts []specs.Mount haveMount := func(destination string) bool { @@ -517,12 +536,18 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return nil, err } + // Get host UID and GID of the container process. + processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) + if err != nil { + return nil, err + } + // Get the list of subscriptions mounts. subscriptionMounts := subscriptions.MountsWithUIDGID(b.MountLabel, cdir, b.DefaultMountsFilePath, mountPoint, int(rootUID), int(rootGID), unshare.IsRootless(), false) // Get the list of mounts that are just for this Run() call. // TODO: acui: de-spaghettify run mounts - runMounts, mountArtifacts, err := runSetupRunMounts(runFileMounts, secrets, sshSources, b.MountLabel, cdir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, b.ProcessLabel) + runMounts, mountArtifacts, err := b.runSetupRunMounts(context, runFileMounts, secrets, stageMountPoints, sshSources, cdir, contextDir, spec.Linux.UIDMappings, spec.Linux.GIDMappings, int(rootUID), int(rootGID), int(processUID), int(processGID)) if err != nil { return nil, err } @@ -532,11 +557,6 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st if err != nil { return nil, err } - // Get host UID and GID of the container process. - processUID, processGID, err := util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, spec.Process.User.UID, spec.Process.User.GID) - if err != nil { - return nil, err - } // Get the list of explicitly-specified volume mounts. volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID), int(processUID), int(processGID)) @@ -544,6 +564,11 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return nil, err } + // prepare list of mount destinations which can be cleaned up safely. + // we can clean bindFiles, subscriptionMounts and specMounts + // everything other than these might have users content + mountArtifacts.RunMountTargets = append(append(append(mountArtifacts.RunMountTargets, cleanableDestinationListFromMounts(bindFileMounts)...), cleanableDestinationListFromMounts(subscriptionMounts)...), cleanableDestinationListFromMounts(specMounts)...) + allMounts := util.SortMounts(append(append(append(append(append(append(volumes, builtins...), runMounts...), subscriptionMounts...), bindFileMounts...), specMounts...), sysfsMount...)) // Add them all, in the preferred order, except where they conflict with something that was previously added. for _, mount := range allMounts { @@ -560,6 +585,23 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st return mountArtifacts, nil } +// Destinations which can be cleaned up after every RUN +func cleanableDestinationListFromMounts(mounts []spec.Mount) []string { + mountDest := []string{} + for _, mount := range mounts { + // Add all destination to mountArtifacts so that they can be cleaned up later + if mount.Destination != "" { + // we dont want to remove destinations with /etc, /dev, /sys, /proc as rootfs already contains these files + // and unionfs will create a `whiteout` i.e `.wh` files on removal of overlapping files from these directories. + // everything other than these will be cleanedup + if !strings.HasPrefix(mount.Destination, "/etc") && !strings.HasPrefix(mount.Destination, "/dev") && !strings.HasPrefix(mount.Destination, "/sys") && !strings.HasPrefix(mount.Destination, "/proc") { + mountDest = append(mountDest, mount.Destination) + } + } + } + return mountDest +} + // addResolvConf copies files from host and sets them up to bind mount into container func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServers, dnsSearch, dnsOptions []string, namespaceOptions define.NamespaceOptions) (string, error) { resolvConf := "/etc/resolv.conf" @@ -691,11 +733,13 @@ func (b *Builder) generateHosts(rdir, hostname string, addHosts []string, chownO } hosts.Write([]byte(fmt.Sprintf("%s\t%s\n", values[1], values[0]))) } + hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s %s\n", b.Container, hostname))) + hosts.Write([]byte(fmt.Sprintf("::1 %s %s\n", b.Container, hostname))) - if hostname != "" { - hosts.Write([]byte(fmt.Sprintf("127.0.0.1 %s\n", hostname))) - hosts.Write([]byte(fmt.Sprintf("::1 %s\n", hostname))) + if ip := util.LocalIP(); ip != "" { + hosts.Write([]byte(fmt.Sprintf("%s %s\n", ip, "host.containers.internal"))) } + cfile := filepath.Join(rdir, filepath.Base(hostPath)) if err = ioutils.AtomicWriteFile(cfile, hosts.Bytes(), stat.Mode().Perm()); err != nil { return "", errors.Wrapf(err, "error writing /etc/hosts into the container") @@ -736,7 +780,8 @@ func setupTerminal(g *generate.Generator, terminalPolicy TerminalPolicy, termina } } -func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) { +func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs []string, spec *specs.Spec, bundlePath, containerName string, + containerCreateW io.WriteCloser, containerStartR io.ReadCloser) (wstatus unix.WaitStatus, err error) { if options.Logger == nil { options.Logger = logrus.StandardLogger() } @@ -772,11 +817,10 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe runtime := options.Runtime if runtime == "" { runtime = util.Runtime() - - localRuntime := util.FindLocalRuntime(runtime) - if localRuntime != "" { - runtime = localRuntime - } + } + localRuntime := util.FindLocalRuntime(runtime) + if localRuntime != "" { + runtime = localRuntime } // Default to just passing down our stdio. @@ -820,6 +864,9 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if stdioPipe, err = runMakeStdioPipe(int(uid), int(gid)); err != nil { return 1, err } + if err = runLabelStdioPipes(stdioPipe, spec.Process.SelinuxLabel, spec.Linux.MountLabel); err != nil { + return 1, err + } errorFds = []int{stdioPipe[unix.Stdout][0], stdioPipe[unix.Stderr][0]} closeBeforeReadingErrorFds = []int{stdioPipe[unix.Stdout][1], stdioPipe[unix.Stderr][1]} // Set stdio to our pipes. @@ -839,9 +886,14 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe } } + runtimeArgs := options.Args[:] + if options.CgroupManager == config.SystemdCgroupsManager { + runtimeArgs = append(runtimeArgs, "--systemd-cgroup") + } + // Build the commands that we'll execute. pidFile := filepath.Join(bundlePath, "pid") - args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) + args := append(append(append(runtimeArgs, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs...), containerName) create := exec.Command(runtime, args...) create.Dir = bundlePath stdin, stdout, stderr := getCreateStdio() @@ -891,7 +943,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe if err != nil { return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) } - stopped := false + var stopped uint32 var reaping sync.WaitGroup reaping.Add(1) go func() { @@ -902,17 +954,20 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe wstatus = 0 options.Logger.Errorf("error waiting for container child process %d: %v\n", pid, err) } - stopped = true + atomic.StoreUint32(&stopped, 1) }() if configureNetwork { - teardown, err := runConfigureNetwork(isolation, options, configureNetworks, pid, containerName, pargs) - if teardown != nil { - defer teardown() - } - if err != nil { + if _, err := containerCreateW.Write([]byte{1}); err != nil { return 1, err } + containerCreateW.Close() + logrus.Debug("waiting for parent start message") + b := make([]byte, 1) + if _, err := containerStartR.Read(b); err != nil { + return 1, errors.Wrap(err, "did not get container start message from parent") + } + containerStartR.Close() } if copyPipes { @@ -935,7 +990,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe return 1, errors.Wrapf(err, "error from %s starting container", runtime) } defer func() { - if !stopped { + if atomic.LoadUint32(&stopped) == 0 { if err2 := kill.Run(); err2 != nil { options.Logger.Infof("error from %s stopping container: %v", runtime, err2) } @@ -952,7 +1007,7 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe stat.Stderr = os.Stderr stateOutput, err := stat.Output() if err != nil { - if stopped { + if atomic.LoadUint32(&stopped) != 0 { // container exited break } @@ -964,20 +1019,20 @@ func runUsingRuntime(isolation define.Isolation, options RunOptions, configureNe switch state.Status { case "running": case "stopped": - stopped = true + atomic.StoreUint32(&stopped, 1) default: return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status) } - if stopped { + if atomic.LoadUint32(&stopped) != 0 { break } select { case <-finishedCopy: - stopped = true + atomic.StoreUint32(&stopped, 1) case <-time.After(time.Until(now.Add(100 * time.Millisecond))): continue } - if stopped { + if atomic.LoadUint32(&stopped) != 0 { break } } @@ -1066,7 +1121,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) { unix.CloseOnExec(fd) } - cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", fmt.Sprintf("%d", pid), "tap0") + cmd := exec.Command(slirp4netns, "--mtu", "65520", "-r", "3", "-c", strconv.Itoa(pid), "tap0") cmd.Stdin, cmd.Stdout, cmd.Stderr = nil, nil, nil cmd.ExtraFiles = []*os.File{rootlessSlirpSyncW} @@ -1107,76 +1162,17 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) { }, nil } -func runConfigureNetwork(isolation define.Isolation, options RunOptions, configureNetworks []string, pid int, containerName string, command []string) (teardown func(), err error) { - var netconf, undo []*libcni.NetworkConfigList - +func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, configureNetworks []string, containerName string) (teardown func(), err error) { if isolation == IsolationOCIRootless { if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil && !ns.Host && ns.Path == "" { return setupRootlessNetwork(pid) } } - confdir := options.CNIConfigDir - // Create a default configuration if one is not present. - // Need to pull containers.conf settings for this one. - containersConf, err := config.Default() - if err != nil { - return nil, errors.Wrapf(err, "failed to get container config") - } - if err := defaultnet.Create(containersConf.Network.DefaultNetwork, containersConf.Network.DefaultSubnet, confdir, confdir, containersConf.Engine.MachineEnabled); err != nil { - logrus.Errorf("Failed to created default CNI network: %v", err) + if len(configureNetworks) == 0 { + configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()} } - // Scan for CNI configuration files. - files, err := libcni.ConfFiles(confdir, []string{".conf"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration files named *.conf in directory %q", confdir) - } - lists, err := libcni.ConfFiles(confdir, []string{".conflist"}) - if err != nil { - return nil, errors.Wrapf(err, "error finding CNI networking configuration list files named *.conflist in directory %q", confdir) - } - logrus.Debugf("CNI network configuration file list: %#v", append(files, lists...)) - // Read the CNI configuration files. - for _, file := range files { - nc, err := libcni.ConfFromFile(file) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration from file %q for %v", file, command) - } - if len(configureNetworks) > 0 && nc.Network != nil && (nc.Network.Name == "" || !util.StringInSlice(nc.Network.Name, configureNetworks)) { - if nc.Network.Name == "" { - logrus.Debugf("configuration in %q has no name, skipping it", file) - } else { - logrus.Debugf("configuration in %q has name %q, skipping it", file, nc.Network.Name) - } - continue - } - if nc.Network == nil { - continue - } - cl, err := libcni.ConfListFromConf(nc) - if err != nil { - return nil, errors.Wrapf(err, "error converting networking configuration from file %q for %v", file, command) - } - logrus.Debugf("using network configuration from %q", file) - netconf = append(netconf, cl) - } - for _, list := range lists { - cl, err := libcni.ConfListFromFile(list) - if err != nil { - return nil, errors.Wrapf(err, "error loading networking configuration list from file %q for %v", list, command) - } - if len(configureNetworks) > 0 && (cl.Name == "" || !util.StringInSlice(cl.Name, configureNetworks)) { - if cl.Name == "" { - logrus.Debugf("configuration list in %q has no name, skipping it", list) - } else { - logrus.Debugf("configuration list in %q has name %q, skipping it", list, cl.Name) - } - continue - } - logrus.Debugf("using network configuration list from %q", list) - netconf = append(netconf, cl) - } // Make sure we can access the container's network namespace, // even after it exits, to successfully tear down the // interfaces. Ensure this by opening a handle to the network @@ -1185,39 +1181,34 @@ func runConfigureNetwork(isolation define.Isolation, options RunOptions, configu netns := fmt.Sprintf("/proc/%d/ns/net", pid) netFD, err := unix.Open(netns, unix.O_RDONLY, 0) if err != nil { - return nil, errors.Wrapf(err, "error opening network namespace for %v", command) + return nil, errors.Wrapf(err, "error opening network namespace") } mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD) - // Build our search path for the plugins. - pluginPaths := strings.Split(options.CNIPluginPath, string(os.PathListSeparator)) - cni := libcni.CNIConfig{Path: pluginPaths} - // Configure the interfaces. - rtconf := make(map[*libcni.NetworkConfigList]*libcni.RuntimeConf) - teardown = func() { - for _, nc := range undo { - if err = cni.DelNetworkList(context.Background(), nc, rtconf[nc]); err != nil { - options.Logger.Errorf("error cleaning up network %v for %v: %v", rtconf[nc].IfName, command, err) - } + + networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) + for i, network := range configureNetworks { + networks[network] = nettypes.PerNetworkOptions{ + InterfaceName: fmt.Sprintf("eth%d", i), } - unix.Close(netFD) } - for i, nc := range netconf { - // Build the runtime config for use with this network configuration. - rtconf[nc] = &libcni.RuntimeConf{ - ContainerID: containerName, - NetNS: mynetns, - IfName: fmt.Sprintf("if%d", i), - Args: [][2]string{}, - CapabilityArgs: map[string]interface{}{}, - } - // Bring it up. - _, err := cni.AddNetworkList(context.Background(), nc, rtconf[nc]) + + opts := nettypes.NetworkOptions{ + ContainerID: containerName, + ContainerName: containerName, + Networks: networks, + } + _, err = b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts}) + if err != nil { + return nil, err + } + + teardown = func() { + err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts}) if err != nil { - return teardown, errors.Wrapf(err, "error configuring network list %v for %v", rtconf[nc].IfName, command) + options.Logger.Errorf("failed to cleanup network: %v", err) } - // Add it to the list of networks to take down when the container process exits. - undo = append([]*libcni.NetworkConfigList{nc}, undo...) } + return teardown, nil } @@ -1575,8 +1566,24 @@ func runUsingRuntimeMain() { os.Exit(1) } + // open the pipes used to communicate with the parent process + var containerCreateW *os.File + var containerStartR *os.File + if options.ConfigureNetwork { + containerCreateW = os.NewFile(4, "containercreatepipe") + if containerCreateW == nil { + fmt.Fprintf(os.Stderr, "could not open fd 4\n") + os.Exit(1) + } + containerStartR = os.NewFile(5, "containerstartpipe") + if containerStartR == nil { + fmt.Fprintf(os.Stderr, "could not open fd 5\n") + os.Exit(1) + } + } + // Run the container, start to finish. - status, err := runUsingRuntime(options.Isolation, options.Options, options.ConfigureNetwork, options.ConfigureNetworks, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName) + status, err := runUsingRuntime(options.Options, options.ConfigureNetwork, options.MoreCreateArgs, ospec, options.BundlePath, options.ContainerName, containerCreateW, containerStartR) if err != nil { fmt.Fprintf(os.Stderr, "error running container: %v\n", err) os.Exit(1) @@ -1690,7 +1697,7 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti return configureNetwork, configureNetworks, configureUTS, nil } -func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) (bool, []string, error) { +func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, []string, error) { defaultNamespaceOptions, err := DefaultNamespaceOptions() if err != nil { return false, nil, err @@ -1701,8 +1708,17 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options RunOptions) namespaceOptions.AddOrReplace(options.NamespaceOptions...) networkPolicy := options.ConfigureNetwork + //Nothing was specified explicitly so network policy should be inherited from builder if networkPolicy == NetworkDefault { networkPolicy = b.ConfigureNetwork + + // If builder policy was NetworkDisabled and + // we want to disable network for this run. + // reset options.ConfigureNetwork to NetworkDisabled + // since it will be treated as source of truth later. + if networkPolicy == NetworkDisabled { + options.ConfigureNetwork = networkPolicy + } } configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy) @@ -1793,7 +1809,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { var foundrw, foundro, foundz, foundZ, foundO, foundU bool - var rootProp string + var rootProp, upperDir, workDir string for _, opt := range options { switch opt { case "rw": @@ -1811,6 +1827,19 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, case "private", "rprivate", "slave", "rslave", "shared", "rshared": rootProp = opt } + + if strings.HasPrefix(opt, "upperdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] + } + } + if strings.HasPrefix(opt, "workdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + workDir = splitOpt[1] + } + } } if !foundrw && !foundro { options = append(options, "rw") @@ -1831,6 +1860,10 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, } } if foundO { + if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") { + return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa") + } + containerDir, err := b.store.ContainerDirectory(b.ContainerID) if err != nil { return specs.Mount{}, err @@ -1841,7 +1874,14 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir) } - overlayMount, err := overlay.Mount(contentDir, host, container, rootUID, rootGID, b.store.GraphOptions()) + overlayOpts := overlay.Options{RootUID: rootUID, + RootGID: rootGID, + UpperDirOptionFragment: upperDir, + WorkDirOptionFragment: workDir, + GraphOpts: b.store.GraphOptions(), + } + + overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) if err == nil { b.TempVolumes[contentDir] = true } @@ -1882,7 +1922,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, // Bind mount volumes given by the user when the container was created for _, i := range volumeMounts { var options []string - spliti := strings.Split(i, ":") + spliti := parse.SplitStringWithColonEscape(i) if len(spliti) > 2 { options = strings.Split(spliti[2], ",") } @@ -1935,9 +1975,6 @@ func setupCapAdd(g *generate.Generator, caps ...string) error { if err := g.AddProcessCapabilityEffective(cap); err != nil { return errors.Wrapf(err, "error adding %q to the effective capability set", cap) } - if err := g.AddProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error adding %q to the inheritable capability set", cap) - } if err := g.AddProcessCapabilityPermitted(cap); err != nil { return errors.Wrapf(err, "error adding %q to the permitted capability set", cap) } @@ -1956,9 +1993,6 @@ func setupCapDrop(g *generate.Generator, caps ...string) error { if err := g.DropProcessCapabilityEffective(cap); err != nil { return errors.Wrapf(err, "error removing %q from the effective capability set", cap) } - if err := g.DropProcessCapabilityInheritable(cap); err != nil { - return errors.Wrapf(err, "error removing %q from the inheritable capability set", cap) - } if err := g.DropProcessCapabilityPermitted(cap); err != nil { return errors.Wrapf(err, "error removing %q from the permitted capability set", cap) } @@ -2079,18 +2113,8 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions g.ClearProcessEnv() if b.CommonBuildOpts.HTTPProxy { - for _, envSpec := range []string{ - "http_proxy", - "HTTP_PROXY", - "https_proxy", - "HTTPS_PROXY", - "ftp_proxy", - "FTP_PROXY", - "no_proxy", - "NO_PROXY", - } { - envVal := os.Getenv(envSpec) - if envVal != "" { + for _, envSpec := range config.ProxyEnv { + if envVal, ok := os.LookupEnv(envSpec); ok { g.AddProcessEnv(envSpec, envVal) } } @@ -2105,14 +2129,18 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions } func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string) error { - spec.Process.User.AdditionalGids = nil - spec.Linux.Resources = nil - emptyDir := filepath.Join(bundleDir, "empty") if err := os.Mkdir(emptyDir, 0); err != nil { return err } + // If the container has a network namespace, we can create a fresh /sys mount + for _, ns := range spec.Linux.Namespaces { + if ns.Type == specs.NetworkNamespace { + return nil + } + } + // Replace /sys with a read-only bind mount. mounts := []specs.Mount{ { @@ -2152,9 +2180,33 @@ func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string Options: []string{bind.NoBindOption, "rbind", "private", "nodev", "noexec", "nosuid", "ro"}, }, } - // Cover up /sys/fs/cgroup, if it exist in our source for /sys. - if _, err := os.Stat("/sys/fs/cgroup"); err == nil { - spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") + + cgroup2, err := cgroups.IsCgroup2UnifiedMode() + if err != nil { + return err + } + if cgroup2 { + hasCgroupNs := false + for _, ns := range spec.Linux.Namespaces { + if ns.Type == specs.CgroupNamespace { + hasCgroupNs = true + break + } + } + if hasCgroupNs { + mounts = append(mounts, specs.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"private", "rw"}, + }) + } + } else { + spec.Linux.Resources = nil + // Cover up /sys/fs/cgroup, if it exist in our source for /sys. + if _, err := os.Stat("/sys/fs/cgroup"); err == nil { + spec.Linux.MaskedPaths = append(spec.Linux.MaskedPaths, "/sys/fs/cgroup") + } } // Keep anything that isn't under /dev, /proc, or /sys. for i := range spec.Mounts { @@ -2172,15 +2224,14 @@ func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, shmSize string func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options RunOptions, configureNetwork bool, configureNetworks, moreCreateArgs []string, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) { var confwg sync.WaitGroup config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{ - Options: options, - Spec: spec, - RootPath: rootPath, - BundlePath: bundlePath, - ConfigureNetwork: configureNetwork, - ConfigureNetworks: configureNetworks, - MoreCreateArgs: moreCreateArgs, - ContainerName: containerName, - Isolation: isolation, + Options: options, + Spec: spec, + RootPath: rootPath, + BundlePath: bundlePath, + ConfigureNetwork: configureNetwork, + MoreCreateArgs: moreCreateArgs, + ContainerName: containerName, + Isolation: isolation, }) if conferr != nil { return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand) @@ -2212,12 +2263,69 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run } confwg.Done() }() + + // create network configuration pipes + var containerCreateR, containerCreateW *os.File + var containerStartR, containerStartW *os.File + if configureNetwork { + containerCreateR, containerCreateW, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerCreateR.Close() + defer containerCreateW.Close() + + containerStartR, containerStartW, err = os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating container create pipe") + } + defer containerStartR.Close() + defer containerStartW.Close() + cmd.ExtraFiles = []*os.File{containerCreateW, containerStartR} + } + cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...) defer preader.Close() defer pwriter.Close() - err = cmd.Run() - if err != nil { - err = errors.Wrapf(err, "error while running runtime") + if err := cmd.Start(); err != nil { + return errors.Wrapf(err, "error while starting runtime") + } + + if configureNetwork { + if err := waitForSync(containerCreateR); err != nil { + // we do not want to return here since we want to capture the exit code from the child via cmd.Wait() + // close the pipes here so that the child will not hang forever + containerCreateR.Close() + containerStartW.Close() + logrus.Errorf("did not get container create message from subprocess: %v", err) + } else { + pidFile := filepath.Join(bundlePath, "pid") + pidValue, err := ioutil.ReadFile(pidFile) + if err != nil { + return err + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue))) + if err != nil { + return errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue)) + } + + teardown, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName) + if teardown != nil { + defer teardown() + } + if err != nil { + return err + } + + logrus.Debug("network namespace successfully setup, send start message to child") + _, err = containerStartW.Write([]byte{1}) + if err != nil { + return err + } + } + } + if err := cmd.Wait(); err != nil { + return errors.Wrapf(err, "error while running runtime") } confwg.Wait() if err == nil { @@ -2229,6 +2337,16 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run return err } +// waitForSync waits for a maximum of 4 minutes to read something from the file +func waitForSync(pipeR *os.File) error { + if err := pipeR.SetDeadline(time.Now().Add(4 * time.Minute)); err != nil { + return err + } + b := make([]byte, 16) + _, err := pipeR.Read(b) + return err +} + func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOptions) error { switch isolation { case IsolationOCIRootless: @@ -2242,8 +2360,7 @@ func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOp if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil { hostNetworking = ns.Host networkNamespacePath = ns.Path - if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) { - logrus.Debugf("Disabling network namespace configuration.") + if hostNetworking { networkNamespacePath = "" } } @@ -2273,14 +2390,18 @@ func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOp // DefaultNamespaceOptions returns the default namespace settings from the // runtime-tools generator library. func DefaultNamespaceOptions() (define.NamespaceOptions, error) { + cfg, err := config.Default() + if err != nil { + return nil, errors.Wrapf(err, "failed to get container config") + } options := define.NamespaceOptions{ - {Name: string(specs.CgroupNamespace), Host: true}, - {Name: string(specs.IPCNamespace), Host: true}, + {Name: string(specs.CgroupNamespace), Host: cfg.CgroupNS() == "host"}, + {Name: string(specs.IPCNamespace), Host: cfg.IPCNS() == "host"}, {Name: string(specs.MountNamespace), Host: true}, - {Name: string(specs.NetworkNamespace), Host: true}, - {Name: string(specs.PIDNamespace), Host: true}, + {Name: string(specs.NetworkNamespace), Host: cfg.NetNS() == "host" || cfg.NetNS() == "container"}, + {Name: string(specs.PIDNamespace), Host: cfg.PidNS() == "host"}, {Name: string(specs.UserNamespace), Host: true}, - {Name: string(specs.UTSNamespace), Host: true}, + {Name: string(specs.UTSNamespace), Host: cfg.UTSNS() == "host"}, } g, err := generate.New("linux") if err != nil { @@ -2308,15 +2429,14 @@ func contains(volumes []string, v string) bool { } type runUsingRuntimeSubprocOptions struct { - Options RunOptions - Spec *specs.Spec - RootPath string - BundlePath string - ConfigureNetwork bool - ConfigureNetworks []string - MoreCreateArgs []string - ContainerName string - Isolation define.Isolation + Options RunOptions + Spec *specs.Spec + RootPath string + BundlePath string + ConfigureNetwork bool + MoreCreateArgs []string + ContainerName string + Isolation define.Isolation } func init() { @@ -2324,13 +2444,16 @@ func init() { } // runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs -func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources map[string]*sshagent.Source, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) ([]spec.Mount, *runMountArtifacts, error) { +func (b *Builder) runSetupRunMounts(context *imagetypes.SystemContext, mounts []string, secrets map[string]define.Secret, stageMountPoints map[string]internal.StageMountDetails, sshSources map[string]*sshagent.Source, containerWorkingDir string, contextDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, rootUID int, rootGID int, processUID int, processGID int) ([]spec.Mount, *runMountArtifacts, error) { mountTargets := make([]string, 0, 10) + tmpFiles := make([]string, 0, len(mounts)) + mountImages := make([]string, 0, 10) finalMounts := make([]specs.Mount, 0, len(mounts)) agents := make([]*sshagent.AgentServer, 0, len(mounts)) sshCount := 0 defaultSSHSock := "" tokens := []string{} + lockedTargets := []string{} for _, mount := range mounts { arr := strings.SplitN(mount, ",", 2) @@ -2344,17 +2467,19 @@ func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources ma // For now, we only support type secret. switch kv[1] { case "secret": - mount, err := getSecretMount(tokens, secrets, mountlabel, containerWorkingDir, uidmap, gidmap) + mount, envFile, err := getSecretMount(tokens, secrets, b.MountLabel, containerWorkingDir, uidmap, gidmap) if err != nil { return nil, nil, err } if mount != nil { finalMounts = append(finalMounts, *mount) mountTargets = append(mountTargets, mount.Destination) - + if envFile != "" { + tmpFiles = append(tmpFiles, envFile) + } } case "ssh": - mount, agent, err := getSSHMount(tokens, sshCount, sshSources, mountlabel, uidmap, gidmap, processLabel) + mount, agent, err := b.getSSHMount(tokens, sshCount, sshSources, b.MountLabel, uidmap, gidmap, b.ProcessLabel) if err != nil { return nil, nil, err } @@ -2368,28 +2493,102 @@ func runSetupRunMounts(mounts []string, secrets map[string]string, sshSources ma // Count is needed as the default destination of the ssh sock inside the container is /run/buildkit/ssh_agent.{i} sshCount++ } + case "bind": + mount, image, err := b.getBindMount(context, tokens, contextDir, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + // only perform cleanup if image was mounted ignore everything else + if image != "" { + mountImages = append(mountImages, image) + } + case "tmpfs": + mount, err := b.getTmpfsMount(tokens, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + case "cache": + mount, lockedPaths, err := b.getCacheMount(tokens, rootUID, rootGID, processUID, processGID, stageMountPoints) + if err != nil { + return nil, nil, err + } + finalMounts = append(finalMounts, *mount) + mountTargets = append(mountTargets, mount.Destination) + lockedTargets = lockedPaths default: return nil, nil, errors.Errorf("invalid mount type %q", kv[1]) } } artifacts := &runMountArtifacts{ RunMountTargets: mountTargets, + TmpFiles: tmpFiles, Agents: agents, + MountedImages: mountImages, SSHAuthSock: defaultSSHSock, + LockedTargets: lockedTargets, } return finalMounts, artifacts, nil } -func getSecretMount(tokens []string, secrets map[string]string, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, error) { +func (b *Builder) getBindMount(context *imagetypes.SystemContext, tokens []string, contextDir string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, string, error) { + if contextDir == "" { + return nil, "", errors.New("Context Directory for current run invocation is not configured") + } + var optionMounts []specs.Mount + mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, image, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, image, err + } + return &volumes[0], image, nil +} + +func (b *Builder) getTmpfsMount(tokens []string, rootUID, rootGID, processUID, processGID int) (*spec.Mount, error) { + var optionMounts []specs.Mount + mount, err := internalParse.GetTmpfsMount(tokens) + if err != nil { + return nil, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, err + } + return &volumes[0], nil +} + +func (b *Builder) getCacheMount(tokens []string, rootUID, rootGID, processUID, processGID int, stageMountPoints map[string]internal.StageMountDetails) (*spec.Mount, []string, error) { + var optionMounts []specs.Mount + mount, lockedTargets, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints) + if err != nil { + return nil, lockedTargets, err + } + optionMounts = append(optionMounts, mount) + volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, rootUID, rootGID, processUID, processGID) + if err != nil { + return nil, lockedTargets, err + } + return &volumes[0], lockedTargets, nil +} + +func getSecretMount(tokens []string, secrets map[string]define.Secret, mountlabel string, containerWorkingDir string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping) (*spec.Mount, string, error) { errInvalidSyntax := errors.New("secret should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") if len(tokens) == 0 { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } var err error var id, target string var required bool var uid, gid uint32 - var mode uint32 = 400 + var mode uint32 = 0400 for _, val := range tokens { kv := strings.SplitN(val, "=", 2) switch kv[0] { @@ -2400,76 +2599,94 @@ func getSecretMount(tokens []string, secrets map[string]string, mountlabel strin case "required": required, err = strconv.ParseBool(kv[1]) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } case "mode": mode64, err := strconv.ParseUint(kv[1], 8, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } mode = uint32(mode64) case "uid": uid64, err := strconv.ParseUint(kv[1], 10, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } uid = uint32(uid64) case "gid": gid64, err := strconv.ParseUint(kv[1], 10, 32) if err != nil { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } gid = uint32(gid64) default: - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } } if id == "" { - return nil, errInvalidSyntax + return nil, "", errInvalidSyntax } // Default location for secretis is /run/secrets/id if target == "" { target = "/run/secrets/" + id } - src, ok := secrets[id] + secr, ok := secrets[id] if !ok { if required { - return nil, errors.Errorf("secret required but no secret with id %s found", id) + return nil, "", errors.Errorf("secret required but no secret with id %s found", id) } - return nil, nil + return nil, "", nil } + var data []byte + var envFile string + var ctrFileOnHost string - // Copy secrets to container working dir, since we need to chmod, chown and relabel it - // for the container user and we don't want to mess with the original file - ctrFileOnHost := filepath.Join(containerWorkingDir, "secrets", id) - _, err = os.Stat(ctrFileOnHost) - if os.IsNotExist(err) { - data, err := ioutil.ReadFile(src) + switch secr.SourceType { + case "env": + data = []byte(os.Getenv(secr.Source)) + tmpFile, err := ioutil.TempFile("/dev/shm", "buildah*") if err != nil { - return nil, err + return nil, "", err } - if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0644); err != nil { - return nil, err + envFile = tmpFile.Name() + ctrFileOnHost = tmpFile.Name() + case "file": + data, err = ioutil.ReadFile(secr.Source) + if err != nil { + return nil, "", err } - if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { - return nil, err + ctrFileOnHost = filepath.Join(containerWorkingDir, "secrets", id) + _, err = os.Stat(ctrFileOnHost) + if !os.IsNotExist(err) { + return nil, "", err } + default: + return nil, "", errors.New("invalid source secret type") + } + + // Copy secrets to container working dir (or tmp dir if it's an env), since we need to chmod, + // chown and relabel it for the container user and we don't want to mess with the original file + if err := os.MkdirAll(filepath.Dir(ctrFileOnHost), 0755); err != nil { + return nil, "", err + } + if err := ioutil.WriteFile(ctrFileOnHost, data, 0644); err != nil { + return nil, "", err } if err := label.Relabel(ctrFileOnHost, mountlabel, false); err != nil { - return nil, err + return nil, "", err } hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) if err != nil { - return nil, err + return nil, "", err } if err := os.Lchown(ctrFileOnHost, int(hostUID), int(hostGID)); err != nil { - return nil, err + return nil, "", err } if err := os.Chmod(ctrFileOnHost, os.FileMode(mode)); err != nil { - return nil, err + return nil, "", err } newMount := specs.Mount{ Destination: target, @@ -2477,11 +2694,11 @@ func getSecretMount(tokens []string, secrets map[string]string, mountlabel strin Source: ctrFileOnHost, Options: []string{"bind", "rprivate", "ro"}, } - return &newMount, nil + return &newMount, envFile, nil } // getSSHMount parses the --mount type=ssh flag in the Containerfile, checks if there's an ssh source provided, and creates and starts an ssh-agent to be forwarded into the container -func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { +func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Source, mountlabel string, uidmap []spec.LinuxIDMapping, gidmap []spec.LinuxIDMapping, processLabel string) (*spec.Mount, *sshagent.AgentServer, error) { errInvalidSyntax := errors.New("ssh should have syntax id=id[,target=path,required=bool,mode=uint,uid=uint,gid=uint") var err error @@ -2524,7 +2741,6 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou gid = uint32(gid64) default: return nil, nil, errInvalidSyntax - } } @@ -2556,13 +2772,13 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou if err := label.Relabel(filepath.Dir(hostSock), mountlabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := label.Relabel(hostSock, mountlabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } @@ -2570,19 +2786,19 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou hostUID, hostGID, err := util.GetHostIDs(uidmap, gidmap, uid, gid) if err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := os.Lchown(hostSock, int(hostUID), int(hostGID)); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } if err := os.Chmod(hostSock, os.FileMode(mode)); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { - logrus.Errorf("error shutting down agent: %v", shutdownErr) + b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } @@ -2596,7 +2812,7 @@ func getSSHMount(tokens []string, count int, sshsources map[string]*sshagent.Sou } // cleanupRunMounts cleans up run mounts so they only appear in this run. -func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { +func (b *Builder) cleanupRunMounts(context *imagetypes.SystemContext, mountpoint string, artifacts *runMountArtifacts) error { for _, agent := range artifacts.Agents { err := agent.Shutdown() if err != nil { @@ -2604,6 +2820,25 @@ func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { } } + //cleanup any mounted images for this run + for _, image := range artifacts.MountedImages { + if image != "" { + // if flow hits here some image was mounted for this run + i, err := internalUtil.LookupImage(context, b.store, image) + if err == nil { + // silently try to unmount and do nothing + // if image is being used by something else + _ = i.Unmount(false) + } + if errors.Cause(err) == storagetypes.ErrImageUnknown { + // Ignore only if ErrImageUnknown + // Reason: Image is already unmounted do nothing + continue + } + return err + } + } + opts := copier.RemoveOptions{ All: true, } @@ -2613,5 +2848,64 @@ func cleanupRunMounts(mountpoint string, artifacts *runMountArtifacts) error { return err } } - return nil + var prevErr error + for _, path := range artifacts.TmpFiles { + err := os.Remove(path) + if !os.IsNotExist(err) { + if prevErr != nil { + logrus.Error(prevErr) + } + prevErr = err + } + } + // unlock if any locked files from this RUN statement + for _, path := range artifacts.LockedTargets { + _, err := os.Stat(path) + if err != nil { + // Lockfile not found this might be a problem, + // since LockedTargets must contain list of all locked files + // don't break here since we need to unlock other files but + // log so user can take a look + logrus.Warnf("Lockfile %q was expected here, stat failed with %v", path, err) + continue + } + lockfile, err := lockfile.GetLockfile(path) + if err != nil { + // unable to get lockfile + // lets log error and continue + // unlocking other files + logrus.Warn(err) + continue + } + if lockfile.Locked() { + lockfile.Unlock() + } else { + logrus.Warnf("Lockfile %q was expected to be locked, this is unexpected", path) + continue + } + } + return prevErr +} + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + conf, err := config.Default() + if err != nil { + return nil, err + } + // copy the config to not modify the default by accident + newconf := *conf + if len(cniConfDir) > 0 { + newconf.Network.NetworkConfigDir = cniConfDir + } + if len(cniPluginPath) > 0 { + plugins := strings.Split(cniPluginPath, string(os.PathListSeparator)) + newconf.Network.CNIPluginDirs = plugins + } + + _, netInt, err := network.NetworkBackend(store, &newconf, false) + if err != nil { + return nil, err + } + return netInt, nil } diff --git a/vendor/github.com/containers/buildah/run_unix.go b/vendor/github.com/containers/buildah/run_unix.go index 86f7c748292..9e62691e854 100644 --- a/vendor/github.com/containers/buildah/run_unix.go +++ b/vendor/github.com/containers/buildah/run_unix.go @@ -4,6 +4,8 @@ package buildah import ( "github.com/containers/buildah/define" + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" "github.com/pkg/errors" ) @@ -22,3 +24,8 @@ func (b *Builder) Run(command []string, options RunOptions) error { func DefaultNamespaceOptions() (NamespaceOptions, error) { return NamespaceOptions{}, errors.New("function not supported on non-linux systems") } + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/run_unsupported.go b/vendor/github.com/containers/buildah/run_unsupported.go index 2a9029e5e1c..f0640ffe2ab 100644 --- a/vendor/github.com/containers/buildah/run_unsupported.go +++ b/vendor/github.com/containers/buildah/run_unsupported.go @@ -3,6 +3,8 @@ package buildah import ( + nettypes "github.com/containers/common/libnetwork/types" + "github.com/containers/storage" "github.com/pkg/errors" ) @@ -18,3 +20,8 @@ func (b *Builder) Run(command []string, options RunOptions) error { func DefaultNamespaceOptions() (NamespaceOptions, error) { return NamespaceOptions{}, errors.New("function not supported on non-linux systems") } + +// getNetworkInterface creates the network interface +func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) { + return nil, errors.New("function not supported on non-linux systems") +} diff --git a/vendor/github.com/containers/buildah/selinux.go b/vendor/github.com/containers/buildah/selinux.go index 00903203e0a..e7e9fd8c27e 100644 --- a/vendor/github.com/containers/buildah/selinux.go +++ b/vendor/github.com/containers/buildah/selinux.go @@ -3,8 +3,12 @@ package buildah import ( + "fmt" + "github.com/opencontainers/runtime-tools/generate" selinux "github.com/opencontainers/selinux/go-selinux" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) func selinuxGetEnabled() bool { @@ -17,3 +21,21 @@ func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { g.SetLinuxMountLabel(mountLabel) } } + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + if !selinuxGetEnabled() || processLabel == "" || mountLabel == "" { + // SELinux is completely disabled, or we're not doing anything at all with labeling + return nil + } + pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file") + if err != nil { + return errors.Wrapf(err, "computing file creation context for pipes") + } + for i := range stdioPipe { + pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0]) + if err := label.Relabel(pipeFdName, pipeContext, false); err != nil { + return errors.Wrapf(err, "setting file label on %q", pipeFdName) + } + } + return nil +} diff --git a/vendor/github.com/containers/buildah/selinux_unsupported.go b/vendor/github.com/containers/buildah/selinux_unsupported.go index 2646148376b..5b1948315a4 100644 --- a/vendor/github.com/containers/buildah/selinux_unsupported.go +++ b/vendor/github.com/containers/buildah/selinux_unsupported.go @@ -12,3 +12,7 @@ func selinuxGetEnabled() bool { func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { } + +func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error { + return nil +} diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go index 47c9ac5cd96..9bfa9d268ea 100644 --- a/vendor/github.com/containers/buildah/util.go +++ b/vendor/github.com/containers/buildah/util.go @@ -123,8 +123,8 @@ func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) { // isReferenceSomething checks if the registry part of a reference is insecure or blocked func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, what func(string, *types.SystemContext) (bool, error)) (bool, error) { - if ref != nil && ref.DockerReference() != nil { - if named, ok := ref.DockerReference().(reference.Named); ok { + if ref != nil { + if named := ref.DockerReference(); named != nil { if domain := reference.Domain(named); domain != "" { return what(domain, sc) } diff --git a/vendor/github.com/containers/buildah/util/types.go b/vendor/github.com/containers/buildah/util/types.go index 3a23fd69038..12546dbd5ce 100644 --- a/vendor/github.com/containers/buildah/util/types.go +++ b/vendor/github.com/containers/buildah/util/types.go @@ -7,10 +7,6 @@ import ( const ( // DefaultRuntime if containers.conf fails. DefaultRuntime = define.DefaultRuntime - // DefaultCNIPluginPath is the default location of CNI plugin helpers. - DefaultCNIPluginPath = define.DefaultCNIPluginPath - // DefaultCNIConfigDir is the default location of CNI configuration files. - DefaultCNIConfigDir = define.DefaultCNIConfigDir ) var ( diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index c6f022d4ed7..13c602c00de 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -3,6 +3,7 @@ package util import ( "fmt" "io" + "net" "net/url" "os" "path/filepath" @@ -16,7 +17,6 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" - "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" @@ -47,9 +47,8 @@ var ( // resolveName checks if name is a valid image name, and if that name doesn't // include a domain portion, returns a list of the names which it might -// correspond to in the set of configured registries, the transport used to -// pull the image, and a boolean which is true iff -// 1) the list of search registries was used, and 2) it was empty. +// correspond to in the set of configured registries, and the transport used to +// pull the image. // // The returned image names never include a transport: prefix, and if transport != "", // (transport, image) should be a valid input to alltransports.ParseImageName. @@ -58,9 +57,9 @@ var ( // // NOTE: The "list of search registries is empty" check does not count blocked registries, // and neither the implied "localhost" nor a possible firstRegistry are counted -func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) { +func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, error) { if name == "" { - return nil, "", false, nil + return nil, "", nil } // Maybe it's a truncated image ID. Don't prepend a registry name, then. @@ -68,7 +67,7 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) { // It's a truncated version of the ID of an image that's present in local storage; // we need only expand the ID. - return []string{img.ID}, "", false, nil + return []string{img.ID}, "", nil } } // If we're referring to an image by digest, it *must* be local and we @@ -76,51 +75,32 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s if strings.HasPrefix(name, "sha256:") { d, err := digest.Parse(name) if err != nil { - return nil, "", false, err + return nil, "", err } img, err := store.Image(d.Encoded()) if err != nil { - return nil, "", false, err + return nil, "", err } - return []string{img.ID}, "", false, nil + return []string{img.ID}, "", nil } // Transports are not supported for local image look ups. srcRef, err := alltransports.ParseImageName(name) if err == nil { - return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), false, nil + return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), nil } - // Figure out the list of registries. - var registries []string - searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc) - if err != nil { - logrus.Debugf("unable to read configured registries to complete %q: %v", name, err) - searchRegistries = nil - } - for _, registry := range searchRegistries { - reg, err := sysregistriesv2.FindRegistry(sc, registry) - if err != nil { - logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err) - continue - } - if reg == nil || !reg.Blocked { - registries = append(registries, registry) - } - } - searchRegistriesAreEmpty := len(registries) == 0 - var candidates []string // Local short-name resolution. namedCandidates, err := shortnames.ResolveLocally(sc, name) if err != nil { - return nil, "", false, err + return nil, "", err } for _, named := range namedCandidates { candidates = append(candidates, named.String()) } - return candidates, DefaultTransport, searchRegistriesAreEmpty, nil + return candidates, DefaultTransport, nil } // ExpandNames takes unqualified names, parses them as image names, and returns @@ -131,7 +111,7 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora expanded := make([]string, 0, len(names)) for _, n := range names { var name reference.Named - nameList, _, _, err := resolveName(n, systemContext, store) + nameList, _, err := resolveName(n, systemContext, store) if err != nil { return nil, errors.Wrapf(err, "error parsing name %q", n) } @@ -182,7 +162,7 @@ func ResolveNameToReferences( systemContext *types.SystemContext, image string, ) (refs []types.ImageReference, err error) { - names, transport, _, err := resolveName(image, systemContext, store) + names, transport, err := resolveName(image, systemContext, store) if err != nil { return nil, errors.Wrapf(err, "error parsing name %q", image) } @@ -486,3 +466,20 @@ func VerifyTagName(imageSpec string) (types.ImageReference, error) { } return ref, nil } + +// LocalIP returns the non loopback local IP of the host +func LocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String() + } + } + } + return "" +} diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 4f5c7d0a16d..2a8f47f7f51 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/containers/common/libimage/manifests" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/retry" "github.com/containers/image/v5/copy" @@ -26,8 +27,10 @@ const ( ) // LookupReferenceFunc return an image reference based on the specified one. -// This can be used to pass custom blob caches to the copy operation. -type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc = manifests.LookupReferenceFunc // CopyOptions allow for customizing image-copy operations. type CopyOptions struct { @@ -218,15 +221,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags - if options.Architecture != "" { - c.systemContext.ArchitectureChoice = options.Architecture - } - if options.OS != "" { - c.systemContext.OSChoice = options.OS - } - if options.Variant != "" { - c.systemContext.VariantChoice = options.Variant - } + c.systemContext.OSChoice, c.systemContext.ArchitectureChoice, c.systemContext.VariantChoice = NormalizePlatform(options.OS, options.Architecture, options.Variant) if options.SignaturePolicyPath != "" { c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath diff --git a/vendor/github.com/containers/common/libimage/download.go b/vendor/github.com/containers/common/libimage/download.go deleted file mode 100644 index 54edf1b9a4f..00000000000 --- a/vendor/github.com/containers/common/libimage/download.go +++ /dev/null @@ -1,46 +0,0 @@ -package libimage - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - - "github.com/pkg/errors" -) - -// tmpdir returns a path to a temporary directory. -func tmpdir() string { - tmpdir := os.Getenv("TMPDIR") - if tmpdir == "" { - tmpdir = "/var/tmp" - } - - return tmpdir -} - -// downloadFromURL downloads an image in the format "https:/example.com/myimage.tar" -// and temporarily saves in it $TMPDIR/importxyz, which is deleted after the image is imported -func (r *Runtime) downloadFromURL(source string) (string, error) { - fmt.Printf("Downloading from %q\n", source) - - outFile, err := ioutil.TempFile(r.systemContext.BigFilesTemporaryDir, "import") - if err != nil { - return "", errors.Wrap(err, "error creating file") - } - defer outFile.Close() - - response, err := http.Get(source) // nolint:noctx - if err != nil { - return "", errors.Wrapf(err, "error downloading %q", source) - } - defer response.Body.Close() - - _, err = io.Copy(outFile, response.Body) - if err != nil { - return "", errors.Wrapf(err, "error saving %s to %s", source, outFile.Name()) - } - - return outFile.Name(), nil -} diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 833f940cca6..063f0714914 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -3,13 +3,14 @@ package libimage import ( "context" "fmt" - "path/filepath" + "path" "strconv" "strings" "time" filtersPkg "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/timetype" + "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,26 +19,53 @@ import ( // indicates that the image matches the criteria. type filterFunc func(*Image) (bool, error) +// Apply the specified filters. At least one filter of each key must apply. +func (i *Image) applyFilters(filters map[string][]filterFunc) (bool, error) { + matches := false + for key := range filters { // and + matches = false + for _, filter := range filters[key] { // or + var err error + matches, err = filter(i) + if err != nil { + // Some images may have been corrupted in the + // meantime, so do an extra check and make the + // error non-fatal (see containers/podman/issues/12582). + if errCorrupted := i.isCorrupted(""); errCorrupted != nil { + logrus.Errorf(errCorrupted.Error()) + return false, nil + } + return false, err + } + if matches { + break + } + } + if !matches { + return false, nil + } + } + return matches, nil +} + // filterImages returns a slice of images which are passing all specified // filters. -func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) { - if len(filters) == 0 { +func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *ListImagesOptions) ([]*Image, error) { + if len(options.Filters) == 0 || len(images) == 0 { return images, nil } + + filters, err := r.compileImageFilters(ctx, options) + if err != nil { + return nil, err + } result := []*Image{} for i := range images { - include := true - var err error - for _, filter := range filters { - include, err = filter(images[i]) - if err != nil { - return nil, err - } - if !include { - break - } + match, err := images[i].applyFilters(filters) + if err != nil { + return nil, err } - if include { + if match { result = append(result, images[i]) } } @@ -47,15 +75,29 @@ func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) { // compileImageFilters creates `filterFunc`s for the specified filters. The // required format is `key=value` with the following supported keys: // after, since, before, containers, dangling, id, label, readonly, reference, intermediate -func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) ([]filterFunc, error) { +func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOptions) (map[string][]filterFunc, error) { logrus.Tracef("Parsing image filters %s", options.Filters) - filterFuncs := []filterFunc{} - for _, filter := range options.Filters { + var tree *layerTree + getTree := func() (*layerTree, error) { + if tree == nil { + t, err := r.layerTree() + if err != nil { + return nil, err + } + tree = t + } + return tree, nil + } + + filters := map[string][]filterFunc{} + duplicate := map[string]string{} + for _, f := range options.Filters { var key, value string - split := strings.SplitN(filter, "=", 2) + var filter filterFunc + split := strings.SplitN(f, "=", 2) if len(split) != 2 { - return nil, errors.Errorf("invalid image filter %q: must be in the format %q", filter, "filter=value") + return nil, errors.Errorf("invalid image filter %q: must be in the format %q", f, "filter=value") } key = split[0] @@ -63,99 +105,187 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp switch key { case "after", "since": - img, _, err := r.LookupImage(value, nil) + img, err := r.time(key, value) if err != nil { - return nil, errors.Wrapf(err, "could not find local image for filter %q", filter) + return nil, err } - filterFuncs = append(filterFuncs, filterAfter(img.Created())) + key = "since" + filter = filterAfter(img.Created()) case "before": - img, _, err := r.LookupImage(value, nil) + img, err := r.time(key, value) if err != nil { - return nil, errors.Wrapf(err, "could not find local image for filter %q", filter) + return nil, err } - filterFuncs = append(filterFuncs, filterBefore(img.Created())) + filter = filterBefore(img.Created()) case "containers": - switch value { - case "false", "true": - case "external": - if options.IsExternalContainerFunc == nil { - return nil, fmt.Errorf("libimage error: external containers filter without callback") - } - default: - return nil, fmt.Errorf("unsupported value %q for containers filter", value) + if err := r.containers(duplicate, key, value, options.IsExternalContainerFunc); err != nil { + return nil, err } - filterFuncs = append(filterFuncs, filterContainers(value, options.IsExternalContainerFunc)) + filter = filterContainers(value, options.IsExternalContainerFunc) case "dangling": - dangling, err := strconv.ParseBool(value) + dangling, err := r.bool(duplicate, key, value) if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for dangling filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterDangling(ctx, dangling)) + t, err := getTree() + if err != nil { + return nil, err + } + + filter = filterDangling(ctx, dangling, t) case "id": - filterFuncs = append(filterFuncs, filterID(value)) + filter = filterID(value) case "intermediate": - intermediate, err := strconv.ParseBool(value) + intermediate, err := r.bool(duplicate, key, value) if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for intermediate filter", value) + return nil, err + } + t, err := getTree() + if err != nil { + return nil, err } - filterFuncs = append(filterFuncs, filterIntermediate(ctx, intermediate)) + + filter = filterIntermediate(ctx, intermediate, t) case "label": - filterFuncs = append(filterFuncs, filterLabel(ctx, value)) + filter = filterLabel(ctx, value) case "readonly": - readOnly, err := strconv.ParseBool(value) + readOnly, err := r.bool(duplicate, key, value) + if err != nil { + return nil, err + } + filter = filterReadOnly(readOnly) + + case "manifest": + manifest, err := r.bool(duplicate, key, value) if err != nil { - return nil, errors.Wrapf(err, "non-boolean value %q for readonly filter", value) + return nil, err } - filterFuncs = append(filterFuncs, filterReadOnly(readOnly)) + filter = filterManifest(ctx, manifest) case "reference": - filterFuncs = append(filterFuncs, filterReference(value)) + filter = filterReferences(value) case "until": - ts, err := timetype.GetTimestamp(value, time.Now()) - if err != nil { - return nil, err - } - seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + until, err := r.until(value) if err != nil { return nil, err } - until := time.Unix(seconds, nanoseconds) - filterFuncs = append(filterFuncs, filterBefore(until)) + filter = filterBefore(until) default: return nil, errors.Errorf("unsupported image filter %q", key) } + filters[key] = append(filters[key], filter) + } + + return filters, nil +} + +func (r *Runtime) containers(duplicate map[string]string, key, value string, externalFunc IsExternalContainerFunc) error { + if exists, ok := duplicate[key]; ok && exists != value { + return errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + switch value { + case "false", "true": + case "external": + if externalFunc == nil { + return fmt.Errorf("libimage error: external containers filter without callback") + } + default: + return fmt.Errorf("unsupported value %q for containers filter", value) + } + return nil +} + +func (r *Runtime) until(value string) (time.Time, error) { + var until time.Time + ts, err := timetype.GetTimestamp(value, time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0) + if err != nil { + return until, err } + return time.Unix(seconds, nanoseconds), nil +} - return filterFuncs, nil +func (r *Runtime) time(key, value string) (*Image, error) { + img, _, err := r.LookupImage(value, nil) + if err != nil { + return nil, errors.Wrapf(err, "could not find local image for filter filter %q=%q", key, value) + } + return img, nil } -// filterReference creates a reference filter for matching the specified value. -func filterReference(value string) filterFunc { - // Replacing all '/' with '|' so that filepath.Match() can work '|' - // character is not valid in image name, so this is safe. - // - // TODO: this has been copied from Podman and requires some more review - // and especially tests. - filter := fmt.Sprintf("*%s*", value) - filter = strings.ReplaceAll(filter, "/", "|") +func (r *Runtime) bool(duplicate map[string]string, key, value string) (bool, error) { + if exists, ok := duplicate[key]; ok && exists != value { + return false, errors.Errorf("specifying %q filter more than once with different values is not supported", key) + } + duplicate[key] = value + set, err := strconv.ParseBool(value) + if err != nil { + return false, errors.Wrapf(err, "non-boolean value %q for %s filter", key, value) + } + return set, nil +} + +// filterManifest filters whether or not the image is a manifest list +func filterManifest(ctx context.Context, value bool) filterFunc { return func(img *Image) (bool, error) { - if len(value) < 1 { - return true, nil + isManifestList, err := img.IsManifestList(ctx) + if err != nil { + return false, err } - for _, name := range img.Names() { - newName := strings.ReplaceAll(name, "/", "|") - match, _ := filepath.Match(filter, newName) - if match { - return true, nil + return isManifestList == value, nil + } +} + +// filterReferences creates a reference filter for matching the specified value. +func filterReferences(value string) filterFunc { + return func(img *Image) (bool, error) { + refs, err := img.NamesReferences() + if err != nil { + return false, err + } + + for _, ref := range refs { + refString := ref.String() // FQN with tag/digest + candidates := []string{refString} + + // Split the reference into 3 components (twice if diggested/tagged): + // 1) Fully-qualified reference + // 2) Without domain + // 3) Without domain and path + if named, isNamed := ref.(reference.Named); isNamed { + candidates = append(candidates, + reference.Path(named), // path/name without tag/digest (Path() removes it) + refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest + + trimmedString := reference.TrimNamed(named).String() + if refString != trimmedString { + tagOrDigest := refString[len(trimmedString):] + candidates = append(candidates, + trimmedString, // FQN without tag/digest + reference.Path(named)+tagOrDigest, // path/name with tag/digest + trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest + } + } + + for _, candidate := range candidates { + // path.Match() is also used by Docker's reference.FamiliarMatch(). + matched, _ := path.Match(value, candidate) + if matched { + return true, nil + } } } return false, nil @@ -221,9 +351,9 @@ func filterContainers(value string, fn IsExternalContainerFunc) filterFunc { } // filterDangling creates a dangling filter for matching the specified value. -func filterDangling(ctx context.Context, value bool) filterFunc { +func filterDangling(ctx context.Context, value bool, tree *layerTree) filterFunc { return func(img *Image) (bool, error) { - isDangling, err := img.IsDangling(ctx) + isDangling, err := img.isDangling(ctx, tree) if err != nil { return false, err } @@ -241,9 +371,9 @@ func filterID(value string) filterFunc { // filterIntermediate creates an intermediate filter for images. An image is // considered to be an intermediate image if it is dangling (i.e., no tags) and // has no children (i.e., no other image depends on it). -func filterIntermediate(ctx context.Context, value bool) filterFunc { +func filterIntermediate(ctx context.Context, value bool, tree *layerTree) filterFunc { return func(img *Image) (bool, error) { - isIntermediate, err := img.IsIntermediate(ctx) + isIntermediate, err := img.isIntermediate(ctx, tree) if err != nil { return false, err } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 00a2d620ef4..661ca159b48 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -44,6 +44,8 @@ type Image struct { completeInspectData *ImageData // Corresponding OCI image. ociv1Image *ociv1.Image + // Names() parsed into references. + namesReferences []reference.Reference } } @@ -59,6 +61,7 @@ func (i *Image) reload() error { i.cached.partialInspectData = nil i.cached.completeInspectData = nil i.cached.ociv1Image = nil + i.cached.namesReferences = nil return nil } @@ -89,6 +92,23 @@ func (i *Image) Names() []string { return i.storageImage.Names } +// NamesReferences returns Names() as references. +func (i *Image) NamesReferences() ([]reference.Reference, error) { + if i.cached.namesReferences != nil { + return i.cached.namesReferences, nil + } + refs := make([]reference.Reference, 0, len(i.Names())) + for _, name := range i.Names() { + ref, err := reference.Parse(name) + if err != nil { + return nil, err + } + refs = append(refs, ref) + } + i.cached.namesReferences = refs + return refs, nil +} + // StorageImage returns the underlying storage.Image. func (i *Image) StorageImage() *storage.Image { return i.storageImage @@ -128,10 +148,16 @@ func (i *Image) IsReadOnly() bool { // IsDangling returns true if the image is dangling, that is an untagged image // without children. func (i *Image) IsDangling(ctx context.Context) (bool, error) { + return i.isDangling(ctx, nil) +} + +// isDangling returns true if the image is dangling, that is an untagged image +// without children. If tree is nil, it will created for this invocation only. +func (i *Image) isDangling(ctx context.Context, tree *layerTree) (bool, error) { if len(i.Names()) > 0 { return false, nil } - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, tree) if err != nil { return false, err } @@ -141,10 +167,17 @@ func (i *Image) IsDangling(ctx context.Context) (bool, error) { // IsIntermediate returns true if the image is an intermediate image, that is // an untagged image with children. func (i *Image) IsIntermediate(ctx context.Context) (bool, error) { + return i.isIntermediate(ctx, nil) +} + +// isIntermediate returns true if the image is an intermediate image, that is +// an untagged image with children. If tree is nil, it will created for this +// invocation only. +func (i *Image) isIntermediate(ctx context.Context, tree *layerTree) (bool, error) { if len(i.Names()) > 0 { return false, nil } - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, tree) if err != nil { return false, err } @@ -189,7 +222,7 @@ func (i *Image) Parent(ctx context.Context) (*Image, error) { // HasChildren returns indicates if the image has children. func (i *Image) HasChildren(ctx context.Context) (bool, error) { - children, err := i.getChildren(ctx, false) + children, err := i.getChildren(ctx, false, nil) if err != nil { return false, err } @@ -198,7 +231,7 @@ func (i *Image) HasChildren(ctx context.Context) (bool, error) { // Children returns the image's children. func (i *Image) Children(ctx context.Context) ([]*Image, error) { - children, err := i.getChildren(ctx, true) + children, err := i.getChildren(ctx, true, nil) if err != nil { return nil, err } @@ -206,13 +239,16 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) { } // getChildren returns a list of imageIDs that depend on the image. If all is -// false, only the first child image is returned. -func (i *Image) getChildren(ctx context.Context, all bool) ([]*Image, error) { - tree, err := i.runtime.layerTree() - if err != nil { - return nil, err +// false, only the first child image is returned. If tree is nil, it will be +// created for this invocation only. +func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { + if tree == nil { + t, err := i.runtime.layerTree() + if err != nil { + return nil, err + } + tree = t } - return tree.children(ctx, i, all) } @@ -608,8 +644,7 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) { } // inRepoTags looks for the specified name/tag pair in the image's repo tags. -// Note that tag may be empty. -func (i *Image) inRepoTags(name, tag string) (reference.Named, error) { +func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) { repoTags, err := i.NamedRepoTags() if err != nil { return nil, err @@ -620,8 +655,10 @@ func (i *Image) inRepoTags(name, tag string) (reference.Named, error) { return nil, err } + name := namedTagged.Name() + tag := namedTagged.Tag() for _, pair := range pairs { - if tag != "" && tag != pair.Tag { + if tag != pair.Tag { continue } if !strings.HasSuffix(pair.Name, name) { diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index bcfb4e1295f..67ab654b2ca 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -2,9 +2,11 @@ package libimage import ( "context" + "fmt" "net/url" "os" + "github.com/containers/common/pkg/download" storageTransport "github.com/containers/image/v5/storage" tarballTransport "github.com/containers/image/v5/tarball" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -61,7 +63,8 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption u, err := url.ParseRequestURI(path) if err == nil && u.Scheme != "" { // If source is a URL, download the file. - file, err := r.downloadFromURL(path) + fmt.Printf("Downloading from %q\n", path) + file, err := download.FromURL(r.systemContext.BigFilesTemporaryDir, path) if err != nil { return "", err } diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index a872e5cf9d1..d44ebf46ef9 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -50,19 +50,39 @@ type RootFS struct { Layers []digest.Digest `json:"Layers"` } -// Inspect inspects the image. Use `withSize` to also perform the -// comparatively expensive size computation of the image. -func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) { +// InspectOptions allow for customizing inspecting images. +type InspectOptions struct { + // Compute the size of the image (expensive). + WithSize bool + // Compute the parent of the image (expensive). + WithParent bool +} + +// Inspect inspects the image. +func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageData, error) { logrus.Debugf("Inspecting image %s", i.ID()) + if options == nil { + options = &InspectOptions{} + } + if i.cached.completeInspectData != nil { - if withSize && i.cached.completeInspectData.Size == int64(-1) { + if options.WithSize && i.cached.completeInspectData.Size == int64(-1) { size, err := i.Size() if err != nil { return nil, err } i.cached.completeInspectData.Size = size } + if options.WithParent && i.cached.completeInspectData.Parent == "" { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + i.cached.completeInspectData.Parent = parentImage.ID() + } + } return i.cached.completeInspectData, nil } @@ -75,10 +95,7 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) if err != nil { return nil, err } - parentImage, err := i.Parent(ctx) - if err != nil { - return nil, err - } + repoTags, err := i.RepoTags() if err != nil { return nil, err @@ -93,7 +110,7 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) } size := int64(-1) - if withSize { + if options.WithSize { size, err = i.Size() if err != nil { return nil, err @@ -124,8 +141,14 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) NamesHistory: i.NamesHistory(), } - if parentImage != nil { - data.Parent = parentImage.ID() + if options.WithParent { + parentImage, err := i.Parent(ctx) + if err != nil { + return nil, err + } + if parentImage != nil { + data.Parent = parentImage.ID() + } } // Determine the format of the image. How we determine certain data @@ -164,7 +187,12 @@ func (i *Image) Inspect(ctx context.Context, withSize bool) (*ImageData, error) return nil, err } data.Comment = dockerManifest.Comment + // NOTE: Health checks may be listed in the container config or + // the config. data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck + if data.HealthCheck == nil { + data.HealthCheck = dockerManifest.Config.Healthcheck + } } if data.Annotations == nil { diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 74a1870e0ff..4dfac710654 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -35,17 +35,6 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( var loadErrors []error for _, f := range []func() ([]string, string, error){ - // DOCKER-ARCHIVE - must be first (see containers/podman/issues/10809) - func() ([]string, string, error) { - logrus.Debugf("-> Attempting to load %q as a Docker archive", path) - ref, err := dockerArchiveTransport.ParseReference(path) - if err != nil { - return nil, dockerArchiveTransport.Transport.Name(), err - } - images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) - return images, dockerArchiveTransport.Transport.Name(), err - }, - // OCI func() ([]string, string, error) { logrus.Debugf("-> Attempting to load %q as an OCI directory", path) @@ -68,6 +57,17 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( return images, ociArchiveTransport.Transport.Name(), err }, + // DOCKER-ARCHIVE + func() ([]string, string, error) { + logrus.Debugf("-> Attempting to load %q as a Docker archive", path) + ref, err := dockerArchiveTransport.ParseReference(path) + if err != nil { + return nil, dockerArchiveTransport.Transport.Name(), err + } + images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) + return images, dockerArchiveTransport.Transport.Name(), err + }, + // DIR func() ([]string, string, error) { logrus.Debugf("-> Attempting to load %q as a Docker dir", path) diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 8d1abfba9a3..ccff908c943 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -27,6 +27,12 @@ import ( const instancesData = "instances.json" +// LookupReferenceFunc return an image reference based on the specified one. +// The returned reference can return custom ImageSource or ImageDestination +// objects which intercept or filter blobs, manifests, and signatures as +// they are read and written. +type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error) + // ErrListImageUnknown is returned when we attempt to create an image reference // for a List that has not yet been saved to an image. var ErrListImageUnknown = stderrors.New("unable to determine which image holds the manifest list") @@ -57,6 +63,7 @@ type PushOptions struct { SignBy string // fingerprint of GPG key to use to sign images RemoveSignatures bool // true to discard signatures in images ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 + SourceFilter LookupReferenceFunc // filter the list source } // Create creates a new list containing information about the specified image, @@ -221,6 +228,11 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push if err != nil { return nil, "", err } + if options.SourceFilter != nil { + if src, err = options.SourceFilter(src); err != nil { + return nil, "", err + } + } copyOptions := &cp.Options{ ImageListSelection: options.ImageListSelection, Instances: options.Instances, @@ -353,9 +365,12 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag } if instanceInfo.OS == "" { instanceInfo.OS = config.OS + instanceInfo.OSVersion = config.OSVersion + instanceInfo.OSFeatures = config.OSFeatures } if instanceInfo.Architecture == "" { instanceInfo.Architecture = config.Architecture + instanceInfo.Variant = config.Variant } } manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index bfea807c8b0..7ceb6283063 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,13 +1,51 @@ package libimage import ( + "runtime" "strings" + "github.com/containerd/containerd/platforms" "github.com/containers/image/v5/docker/reference" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +// NormalizePlatform normalizes (according to the OCI spec) the specified os, +// arch and variant. If left empty, the individual item will not be normalized. +func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { + os, arch, variant = rawOS, rawArch, rawVariant + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + rawPlatform := os + "/" + arch + if variant != "" { + rawPlatform += "/" + variant + } + + normalizedPlatform, err := platforms.Parse(rawPlatform) + if err != nil { + logrus.Debugf("Error normalizing platform: %v", err) + return rawOS, rawArch, rawVariant + } + logrus.Debugf("Normalized platform %s to %s", rawPlatform, normalizedPlatform) + os = rawOS + if rawOS != "" { + os = normalizedPlatform.OS + } + arch = rawArch + if rawArch != "" { + arch = normalizedPlatform.Architecture + } + variant = rawVariant + if rawVariant != "" { + variant = normalizedPlatform.Variant + } + return os, arch, variant +} + // NormalizeName normalizes the provided name according to the conventions by // Podman and Buildah. If tag and digest are missing, the "latest" tag will be // used. If it's a short name, it will be prefixed with "localhost/". diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 1c322c37e84..ff93b6ed888 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "runtime" "strings" "time" @@ -21,6 +22,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" digest "github.com/opencontainers/go-digest" + ociSpec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -169,6 +171,20 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP return localImages, pullError } +// nameFromAnnotations returns a reference string to be used as an image name, +// or an empty string. The annotations map may be nil. +func nameFromAnnotations(annotations map[string]string) string { + if annotations == nil { + return "" + } + // buildkit/containerd are using a custom annotation see + // containers/podman/issues/12560. + if annotations["io.containerd.image.name"] != "" { + return annotations["io.containerd.image.name"] + } + return annotations[ociSpec.AnnotationRefName] +} + // copyFromDefault is the default copier for a number of transports. Other // transports require some specific dancing, sometimes Yoga. func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) { @@ -201,15 +217,16 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, err } - // if index.json has no reference name, compute the image ID instead - if manifestDescriptor.Annotations == nil || manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] == "" { + storageName = nameFromAnnotations(manifestDescriptor.Annotations) + switch len(storageName) { + case 0: + // If there's no reference name in the annotations, compute an ID. storageName, err = getImageID(ctx, ref, nil) if err != nil { return nil, err } imageName = "sha256:" + storageName[1:] - } else { - storageName = manifestDescriptor.Annotations["org.opencontainers.image.ref.name"] + default: named, err := NormalizeName(storageName) if err != nil { return nil, err @@ -227,8 +244,14 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, imageName = named.String() default: - storageName = toLocalImageName(ref.StringWithinTransport()) - imageName = storageName + // Path-based transports (e.g., dir) may include invalid + // characters, so we should pessimistically generate an ID + // instead of looking at the StringWithinTransport(). + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] } // Create a storage reference. @@ -424,12 +447,18 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str // If there's already a local image "localhost/foo", then we should // attempt pulling that instead of doing the full short-name dance. // - // NOTE: we must ignore the platform of a local image when doing - // lookups here, even if arch/os/variant is set. Some images set an - // incorrect or even invalid platform (see containers/podman/issues/10682). - // Doing the lookup while ignoring the platform checks prevents - // redundantly downloading the same image. - localImage, resolvedImageName, err = r.LookupImage(imageName, nil) + // NOTE that we only do platform checks if the specified values differ + // from the local platform. Unfortunately, there are many images used + // in the wild which don't set the correct value(s) in the config + // causing various issues such as containers/podman/issues/10682. + lookupImageOptions := &LookupImageOptions{Variant: options.Variant} + if options.Architecture != runtime.GOARCH { + lookupImageOptions.Architecture = options.Architecture + } + if options.OS != runtime.GOOS { + lookupImageOptions.OS = options.OS + } + localImage, resolvedImageName, err = r.LookupImage(imageName, lookupImageOptions) if err != nil && errors.Cause(err) != storage.ErrImageUnknown { logrus.Errorf("Looking up %s in local storage: %v", imageName, err) } @@ -442,45 +471,26 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str } } - customPlatform := false - if len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 { - customPlatform = true - // Unless the pull policy is "always", we must pessimistically assume - // that the local image has an invalid architecture (see - // containers/podman/issues/10682). Hence, whenever the user requests - // a custom platform, set the pull policy to "always" to make sure - // we're pulling down the image. + customPlatform := len(options.Architecture)+len(options.OS)+len(options.Variant) > 0 + if customPlatform && pullPolicy != config.PullPolicyAlways && pullPolicy != config.PullPolicyNever { + // Unless the pull policy is always/never, we must + // pessimistically assume that the local image has an invalid + // architecture (see containers/podman/issues/10682). Hence, + // whenever the user requests a custom platform, set the pull + // policy to "newer" to make sure we're pulling down the + // correct image. // - // NOTE that this is will even override --pull={false,never}. This is - // very likely a bug but a consistent one in Podman/Buildah and should - // be addressed at a later point. - if pullPolicy != config.PullPolicyAlways { - switch { - // User input clearly refer to a local image. - case strings.HasPrefix(imageName, "localhost/"): - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "never", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyNever - - // Image resolved to a local one, so let's still have a - // look at the registries or aliases but use it - // otherwise. - case strings.HasPrefix(resolvedImageName, "localhost/"): - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "newer", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyNewer - - default: - logrus.Debugf("Enforcing pull policy to %q to support custom platform (arch: %q, os: %q, variant: %q)", "always", options.Architecture, options.OS, options.Variant) - pullPolicy = config.PullPolicyAlways - } - } + // NOTE that this is will even override --pull={false,never}. + pullPolicy = config.PullPolicyNewer + logrus.Debugf("Enforcing pull policy to %q to pull custom platform (arch: %q, os: %q, variant: %q) - local image may mistakenly specify wrong platform", pullPolicy, options.Architecture, options.OS, options.Variant) } if pullPolicy == config.PullPolicyNever { if localImage != nil { - logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) + logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) return []string{resolvedImageName}, nil } - logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName) + logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName) return nil, errors.Wrap(storage.ErrImageUnknown, imageName) } @@ -518,6 +528,12 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str sys := r.systemContextCopy() resolved, err := shortnames.Resolve(sys, imageName) if err != nil { + // TODO: that is a too big of a hammer since we should only + // ignore errors that indicate that there's no alias and no + // USRs. Must be addressed in c/image first. + if localImage != nil && pullPolicy == config.PullPolicyNewer { + return []string{resolvedImageName}, nil + } return nil, err } diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 7f25df20053..86e7eee5638 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -21,6 +21,16 @@ import ( // Faster than the standard library, see https://github.com/json-iterator/go. var json = jsoniter.ConfigCompatibleWithStandardLibrary +// tmpdir returns a path to a temporary directory. +func tmpdir() string { + tmpdir := os.Getenv("TMPDIR") + if tmpdir == "" { + tmpdir = "/var/tmp" + } + + return tmpdir +} + // RuntimeOptions allow for creating a customized Runtime. type RuntimeOptions struct { // The base system context of the runtime which will be used throughout @@ -243,6 +253,8 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, if options.Variant == "" { options.Variant = r.systemContext.VariantChoice } + // Normalize platform to be OCI compatible (e.g., "aarch64" -> "arm64"). + options.OS, options.Architecture, options.Variant = NormalizePlatform(options.OS, options.Architecture, options.Variant) // First, check if we have an exact match in the storage. Maybe an ID // or a fully-qualified image name. @@ -379,41 +391,49 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupIm return nil, "", err } - if !shortnames.IsShortName(name) { - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, "", err - } - digested, hasDigest := named.(reference.Digested) - if !hasDigest { - return nil, "", errors.Wrap(storage.ErrImageUnknown, name) - } + ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed + if err != nil { + return nil, "", err + } + named, isNamed := ref.(reference.Named) + if !isNamed { + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + digested, isDigested := named.(reference.Digested) + if isDigested { logrus.Debug("Looking for image with matching recorded digests") digest := digested.Digest() for _, image := range allImages { for _, d := range image.Digests() { - if d == digest { - return image, name, nil + if d != digest { + continue + } + // Also make sure that the matching image fits all criteria (e.g., manifest list). + if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil { + return nil, "", err } + return image, name, nil + } } + return nil, "", errors.Wrap(storage.ErrImageUnknown, name) + } + if !shortnames.IsShortName(name) { return nil, "", errors.Wrap(storage.ErrImageUnknown, name) } - // Podman compat: if we're looking for a short name but couldn't - // resolve it via the registries.conf dance, we need to look at *all* - // images and check if the name we're looking for matches a repo tag. - // Split the name into a repo/tag pair - split := strings.SplitN(name, ":", 2) - repo := split[0] - tag := "" - if len(split) == 2 { - tag = split[1] + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + namedTagged, isNammedTagged := named.(reference.NamedTagged) + if !isNammedTagged { + // NOTE: this should never happen since we already know it's + // not a digested reference. + return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown) } + for _, image := range allImages { - named, err := image.inRepoTags(repo, tag) + named, err := image.inRepoTags(namedTagged) if err != nil { return nil, "", err } @@ -477,13 +497,16 @@ func (r *Runtime) imageReferenceMatchesContext(ref types.ImageReference, options } if options.Architecture != "" && options.Architecture != data.Architecture { - return false, err + logrus.Debugf("architecture %q does not match architecture %q of image %s", options.Architecture, data.Architecture, ref) + return false, nil } if options.OS != "" && options.OS != data.Os { - return false, err + logrus.Debugf("OS %q does not match OS %q of image %s", options.OS, data.Os, ref) + return false, nil } if options.Variant != "" && options.Variant != data.Variant { - return false, err + logrus.Debugf("variant %q does not match variant %q of image %s", options.Variant, data.Variant, ref) + return false, nil } return true, nil @@ -539,16 +562,7 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI } } - var filters []filterFunc - if len(options.Filters) > 0 { - compiledFilters, err := r.compileImageFilters(ctx, options) - if err != nil { - return nil, err - } - filters = append(filters, compiledFilters...) - } - - return filterImages(images, filters) + return r.filterImages(ctx, images, options) } // RemoveImagesOptions allow for customizing image removal. diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index df29bc7da7a..33a4776ce3f 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -58,6 +58,10 @@ type SearchOptions struct { InsecureSkipTLSVerify types.OptionalBool // ListTags returns the search result with available tags ListTags bool + // Registries to search if the specified term does not include a + // registry. If set, the unqualified-search registries in + // containers-registries.conf(5) are ignored. + Registries []string } // SearchFilter allows filtering images while searching. @@ -105,6 +109,10 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) { return sFilter, nil } +// Search searches term. If term includes a registry, only this registry will +// be used for searching. Otherwise, the unqualified-search registries in +// containers-registries.conf(5) or the ones specified in the options will be +// used. func (r *Runtime) Search(ctx context.Context, term string, options *SearchOptions) ([]SearchResult, error) { if options == nil { options = &SearchOptions{} @@ -117,10 +125,14 @@ func (r *Runtime) Search(ctx context.Context, term string, options *SearchOption // that we cannot use the reference parser from the containers/image // library as the search term may container arbitrary input such as // wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629. - if spl := strings.SplitN(term, "/", 2); len(spl) > 1 { - searchRegistries = append(searchRegistries, spl[0]) + spl := strings.SplitN(term, "/", 2) + switch { + case len(spl) > 1: + searchRegistries = []string{spl[0]} term = spl[1] - } else { + case len(options.Registries) > 0: + searchRegistries = options.Registries + default: regs, err := sysregistriesv2.UnqualifiedSearchRegistries(r.systemContextCopy()) if err != nil { return nil, err @@ -244,7 +256,7 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri name = index + "/library/" + results[i].Name } params := SearchResult{ - Index: index, + Index: registry, Name: name, Description: description, Official: official, @@ -284,8 +296,9 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr paramsArr := []SearchResult{} for i := 0; i < limit; i++ { params := SearchResult{ - Name: imageRef.DockerReference().Name(), - Tag: tags[i], + Name: imageRef.DockerReference().Name(), + Tag: tags[i], + Index: registry, } paramsArr = append(paramsArr, params) } diff --git a/vendor/github.com/containers/common/libnetwork/cni/README.md b/vendor/github.com/containers/common/libnetwork/cni/README.md new file mode 100644 index 00000000000..6f57feff51b --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/README.md @@ -0,0 +1,10 @@ +This package abstracts CNI from libpod. +It implements the `ContainerNetwork` interface defined in [libpod/network/types/network.go](../types/network.go) for the CNI backend. + + +## Testing +Run the tests with: +``` +go test -v -mod=vendor -cover ./libpod/network/cni/ +``` +Run the tests as root to also test setup/teardown. This will execute CNI and therefore the cni plugins have to be installed. diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile b/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile new file mode 100644 index 00000000000..d302f441c76 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni.coverprofile @@ -0,0 +1,483 @@ +mode: count +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:25.110,36.16 4 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:39.2,39.37 1 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:47.2,48.16 2 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:51.2,57.34 5 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:109.2,111.22 2 174 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:36.16,38.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:39.37,40.51 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:40.51,44.4 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:48.16,50.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:58.33,61.17 3 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:64.3,67.19 2 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:72.3,72.22 1 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:75.3,75.23 1 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:79.3,80.17 2 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:84.34,87.17 3 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:90.3,93.23 2 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:97.3,98.17 2 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:102.10,105.39 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:61.17,63.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:67.19,69.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:72.22,74.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:75.23,77.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:80.17,82.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:87.17,89.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:93.23,95.4 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:98.17,100.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:114.74,115.33 1 174 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:120.2,120.14 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:115.33,116.34 1 678 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:116.34,118.4 1 96 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:125.95,126.45 1 175 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:131.2,131.50 1 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:135.2,136.32 2 137 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:194.2,194.12 1 136 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:126.45,129.3 2 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:131.50,133.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:136.32,137.26 1 156 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:137.26,143.18 3 156 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:146.4,150.26 3 156 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:167.4,171.29 4 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:177.4,177.27 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:183.4,183.44 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:188.4,188.32 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:191.4,191.48 1 155 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:143.18,145.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:150.26,152.23 2 118 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:156.5,157.20 2 117 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:152.23,154.6 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:157.20,159.6 1 97 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:160.10,160.32 1 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:160.32,163.19 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:163.19,165.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:171.29,173.26 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:173.26,175.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:177.27,179.24 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:179.24,181.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:183.44,187.5 3 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:188.32,190.5 1 20 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:198.96,199.35 1 38 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:210.2,210.12 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:199.35,200.54 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:200.54,202.29 2 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:207.4,207.17 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:202.29,203.32 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:203.32,205.6 1 19 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:216.138,223.30 2 87 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:238.2,240.36 3 87 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:259.2,261.22 3 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:266.2,269.24 3 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:289.2,291.16 3 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:294.2,295.17 2 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:310.2,311.16 2 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:314.2,314.33 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:223.30,224.42 1 84 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:233.3,233.54 1 84 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:224.42,226.18 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:229.4,231.68 3 91 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:226.18,228.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:234.8,236.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:240.36,241.12 1 8 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:242.14,244.18 2 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:248.15,250.18 2 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:254.11,255.69 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:244.18,246.5 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:250.18,252.5 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:261.22,264.3 2 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:270.33,274.62 3 78 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:279.3,279.18 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:283.34,284.87 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:286.10,287.85 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:274.62,277.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:279.18,281.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:291.16,293.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:295.17,298.17 3 34 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:301.3,302.17 2 34 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:305.3,306.75 2 34 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:298.17,300.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:302.17,304.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:307.8,309.3 1 48 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:311.16,313.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:318.40,319.15 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:322.2,323.16 2 4 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:326.2,326.11 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:329.2,329.15 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:319.15,321.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:323.16,325.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:326.11,328.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:333.42,334.16 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:337.2,338.16 2 3 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:341.2,341.23 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:344.2,344.15 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:334.16,336.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:338.16,340.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:341.23,343.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:347.90,349.29 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:377.2,377.22 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:349.29,350.26 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:353.3,355.38 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:350.26,352.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:355.38,356.72 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:359.4,366.41 3 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:356.72,358.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_conversion.go:366.41,374.5 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:47.41,49.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:54.2,54.21 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:57.2,57.12 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:49.17,51.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:51.8,51.23 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:51.23,53.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:54.21,56.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:61.122,71.16 9 688 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:74.2,74.28 1 688 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:71.16,73.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:78.88,83.22 3 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:92.2,92.14 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:83.22,84.23 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:84.23,86.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:86.9,88.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:89.8,89.63 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:89.63,91.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_exec.go:96.77,98.2 1 688 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:123.80,128.21 5 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:131.2,131.22 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:134.2,134.19 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:137.2,137.10 1 82 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:128.21,130.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:131.22,133.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:134.19,136.3 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:141.119,155.54 4 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:158.2,158.16 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:155.54,157.3 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:162.97,170.2 3 84 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:173.113,179.23 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:188.2,188.15 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:191.2,191.18 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:179.23,180.32 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:183.3,183.30 1 3 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:180.32,182.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:183.30,185.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:188.15,190.3 1 89 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:196.43,198.2 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:202.58,204.12 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:207.2,208.16 2 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:211.2,211.29 1 91 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:204.12,206.3 1 6 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:208.16,210.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:216.39,224.2 4 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:227.41,231.2 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:234.37,238.2 1 78 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:242.56,250.2 3 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:253.44,254.26 1 79 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:259.2,259.14 1 0 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:254.26,255.65 1 79 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:255.65,257.4 1 79 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:263.78,268.13 2 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:273.2,273.21 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:276.2,279.50 3 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:282.2,282.10 1 4 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:268.13,270.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:273.21,275.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:279.50,281.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/cni_types.go:285.51,292.2 3 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:20.78,24.16 4 55 +github.com/containers/podman/v3/libpod/network/cni/config.go:27.2,28.16 2 55 +github.com/containers/podman/v3/libpod/network/cni/config.go:32.2,33.32 2 34 +github.com/containers/podman/v3/libpod/network/cni/config.go:24.16,26.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:28.16,30.3 1 21 +github.com/containers/podman/v3/libpod/network/cni/config.go:38.97,40.29 1 103 +github.com/containers/podman/v3/libpod/network/cni/config.go:46.2,46.25 1 103 +github.com/containers/podman/v3/libpod/network/cni/config.go:50.2,50.30 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:53.2,53.31 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:56.2,56.35 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:60.2,63.27 3 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:85.2,86.17 2 99 +github.com/containers/podman/v3/libpod/network/cni/config.go:93.2,93.27 1 99 +github.com/containers/podman/v3/libpod/network/cni/config.go:112.2,112.36 1 93 +github.com/containers/podman/v3/libpod/network/cni/config.go:123.2,126.87 2 87 +github.com/containers/podman/v3/libpod/network/cni/config.go:131.2,132.16 2 87 +github.com/containers/podman/v3/libpod/network/cni/config.go:135.2,135.79 1 82 +github.com/containers/podman/v3/libpod/network/cni/config.go:40.29,42.3 1 27 +github.com/containers/podman/v3/libpod/network/cni/config.go:46.25,48.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:50.30,52.3 1 101 +github.com/containers/podman/v3/libpod/network/cni/config.go:53.31,55.3 1 94 +github.com/containers/podman/v3/libpod/network/cni/config.go:56.35,58.3 1 102 +github.com/containers/podman/v3/libpod/network/cni/config.go:63.27,64.53 1 56 +github.com/containers/podman/v3/libpod/network/cni/config.go:67.3,67.47 1 54 +github.com/containers/podman/v3/libpod/network/cni/config.go:64.53,66.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:67.47,69.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:70.8,72.17 2 46 +github.com/containers/podman/v3/libpod/network/cni/config.go:75.3,75.25 1 46 +github.com/containers/podman/v3/libpod/network/cni/config.go:72.17,74.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:86.17,88.17 2 51 +github.com/containers/podman/v3/libpod/network/cni/config.go:88.17,90.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:94.33,96.54 1 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:99.3,100.17 2 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:103.34,105.17 2 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:108.10,109.93 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:96.54,98.4 1 34 +github.com/containers/podman/v3/libpod/network/cni/config.go:100.17,102.4 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:105.17,107.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:112.36,114.17 2 97 +github.com/containers/podman/v3/libpod/network/cni/config.go:117.3,117.51 1 91 +github.com/containers/podman/v3/libpod/network/cni/config.go:114.17,116.4 1 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:117.51,119.4 1 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:126.87,129.3 2 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:132.16,134.3 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:140.59,144.16 4 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:148.2,149.16 2 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:154.2,154.48 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:159.2,159.59 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:170.2,173.24 3 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:144.16,146.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:149.16,151.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:154.48,156.3 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:159.59,161.17 2 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:161.17,164.18 2 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:164.18,166.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:179.88,183.16 4 15 +github.com/containers/podman/v3/libpod/network/cni/config.go:187.2,189.33 2 15 +github.com/containers/podman/v3/libpod/network/cni/config.go:198.2,198.22 1 15 +github.com/containers/podman/v3/libpod/network/cni/config.go:183.16,185.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:189.33,190.34 1 120 +github.com/containers/podman/v3/libpod/network/cni/config.go:196.3,196.46 1 52 +github.com/containers/podman/v3/libpod/network/cni/config.go:190.34,192.31 1 99 +github.com/containers/podman/v3/libpod/network/cni/config.go:192.31,193.19 1 68 +github.com/containers/podman/v3/libpod/network/cni/config.go:202.77,206.16 4 18 +github.com/containers/podman/v3/libpod/network/cni/config.go:210.2,211.16 2 18 +github.com/containers/podman/v3/libpod/network/cni/config.go:214.2,214.32 1 16 +github.com/containers/podman/v3/libpod/network/cni/config.go:206.16,208.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:211.16,213.3 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:217.50,218.22 1 6 +github.com/containers/podman/v3/libpod/network/cni/config.go:221.2,221.36 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:230.2,230.31 1 4 +github.com/containers/podman/v3/libpod/network/cni/config.go:235.2,235.12 1 4 +github.com/containers/podman/v3/libpod/network/cni/config.go:218.22,220.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:221.36,223.17 2 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:226.3,226.71 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:223.17,225.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:226.71,228.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:230.31,232.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:232.8,234.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:238.92,239.36 1 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:255.2,255.31 1 89 +github.com/containers/podman/v3/libpod/network/cni/config.go:264.2,264.25 1 89 +github.com/containers/podman/v3/libpod/network/cni/config.go:290.2,291.12 2 89 +github.com/containers/podman/v3/libpod/network/cni/config.go:239.36,241.63 2 87 +github.com/containers/podman/v3/libpod/network/cni/config.go:244.3,244.62 1 85 +github.com/containers/podman/v3/libpod/network/cni/config.go:241.63,243.4 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:244.62,246.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:247.8,250.17 3 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:250.17,252.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:255.31,257.17 2 24 +github.com/containers/podman/v3/libpod/network/cni/config.go:260.3,260.57 1 24 +github.com/containers/podman/v3/libpod/network/cni/config.go:257.17,259.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:264.25,267.42 3 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:275.3,275.12 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:282.3,282.12 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:267.42,268.37 1 7 +github.com/containers/podman/v3/libpod/network/cni/config.go:271.4,271.37 1 7 +github.com/containers/podman/v3/libpod/network/cni/config.go:268.37,270.5 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:271.37,273.5 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:275.12,277.18 2 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:280.4,280.58 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:277.18,279.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:282.12,284.18 2 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:287.4,287.58 1 3 +github.com/containers/podman/v3/libpod/network/cni/config.go:284.18,286.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:298.88,299.14 1 97 +github.com/containers/podman/v3/libpod/network/cni/config.go:302.2,302.24 1 97 +github.com/containers/podman/v3/libpod/network/cni/config.go:309.2,310.16 2 96 +github.com/containers/podman/v3/libpod/network/cni/config.go:315.2,315.59 1 96 +github.com/containers/podman/v3/libpod/network/cni/config.go:319.2,320.22 2 94 +github.com/containers/podman/v3/libpod/network/cni/config.go:331.2,331.25 1 93 +github.com/containers/podman/v3/libpod/network/cni/config.go:339.2,339.12 1 91 +github.com/containers/podman/v3/libpod/network/cni/config.go:299.14,301.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:302.24,304.3 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:310.16,312.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:315.59,317.3 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:320.22,321.36 1 2 +github.com/containers/podman/v3/libpod/network/cni/config.go:321.36,323.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:324.8,324.23 1 92 +github.com/containers/podman/v3/libpod/network/cni/config.go:324.23,326.17 2 90 +github.com/containers/podman/v3/libpod/network/cni/config.go:329.3,329.17 1 90 +github.com/containers/podman/v3/libpod/network/cni/config.go:326.17,328.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/config.go:331.25,332.78 1 5 +github.com/containers/podman/v3/libpod/network/cni/config.go:335.3,335.74 1 4 +github.com/containers/podman/v3/libpod/network/cni/config.go:332.78,334.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/config.go:335.74,337.4 1 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:74.78,77.16 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:81.2,82.30 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:86.2,87.25 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:90.2,91.16 2 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:95.2,106.15 3 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:77.16,79.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:82.30,84.3 1 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:87.25,89.3 1 68 +github.com/containers/podman/v3/libpod/network/cni/network.go:91.16,93.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:109.43,111.23 1 93 +github.com/containers/podman/v3/libpod/network/cni/network.go:115.2,116.16 2 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:119.2,120.29 2 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:160.2,160.39 1 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:168.2,170.12 3 67 +github.com/containers/podman/v3/libpod/network/cni/network.go:111.23,113.3 1 26 +github.com/containers/podman/v3/libpod/network/cni/network.go:116.16,118.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:120.29,122.17 2 180 +github.com/containers/podman/v3/libpod/network/cni/network.go:130.3,130.47 1 177 +github.com/containers/podman/v3/libpod/network/cni/network.go:135.3,135.86 1 176 +github.com/containers/podman/v3/libpod/network/cni/network.go:140.3,140.41 1 176 +github.com/containers/podman/v3/libpod/network/cni/network.go:145.3,146.17 2 175 +github.com/containers/podman/v3/libpod/network/cni/network.go:150.3,156.36 3 174 +github.com/containers/podman/v3/libpod/network/cni/network.go:122.17,124.27 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:127.4,127.12 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:124.27,126.5 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:130.47,132.12 2 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:135.86,137.12 2 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:140.41,142.12 2 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:146.17,148.12 2 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:160.39,162.17 2 48 +github.com/containers/podman/v3/libpod/network/cni/network.go:165.3,165.43 1 48 +github.com/containers/podman/v3/libpod/network/cni/network.go:162.17,164.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:173.63,183.2 2 48 +github.com/containers/podman/v3/libpod/network/cni/network.go:190.68,192.41 1 23 +github.com/containers/podman/v3/libpod/network/cni/network.go:196.2,197.33 2 5 +github.com/containers/podman/v3/libpod/network/cni/network.go:210.2,210.16 1 4 +github.com/containers/podman/v3/libpod/network/cni/network.go:213.2,213.106 1 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:192.41,194.3 1 18 +github.com/containers/podman/v3/libpod/network/cni/network.go:197.33,199.37 1 9 +github.com/containers/podman/v3/libpod/network/cni/network.go:203.3,203.52 1 9 +github.com/containers/podman/v3/libpod/network/cni/network.go:199.37,201.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:203.52,204.18 1 5 +github.com/containers/podman/v3/libpod/network/cni/network.go:207.4,207.13 1 4 +github.com/containers/podman/v3/libpod/network/cni/network.go:204.18,206.5 1 1 +github.com/containers/podman/v3/libpod/network/cni/network.go:210.16,212.3 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:218.47,221.2 2 262 +github.com/containers/podman/v3/libpod/network/cni/network.go:224.97,233.6 2 25 +github.com/containers/podman/v3/libpod/network/cni/network.go:233.6,234.103 1 27 +github.com/containers/podman/v3/libpod/network/cni/network.go:240.3,242.17 3 2 +github.com/containers/podman/v3/libpod/network/cni/network.go:234.103,239.4 2 25 +github.com/containers/podman/v3/libpod/network/cni/network.go:242.17,244.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:249.97,251.29 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:264.2,264.60 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:251.29,254.17 2 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:257.3,257.104 1 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:254.17,256.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:257.104,262.4 2 3 +github.com/containers/podman/v3/libpod/network/cni/network.go:269.61,272.33 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:278.2,279.16 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:282.2,282.45 1 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:272.33,273.40 1 64 +github.com/containers/podman/v3/libpod/network/cni/network.go:273.40,275.4 1 65 +github.com/containers/podman/v3/libpod/network/cni/network.go:279.16,281.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:287.58,291.16 4 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:294.2,300.31 5 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:307.2,307.78 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:291.16,293.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/network.go:300.31,302.48 2 57 +github.com/containers/podman/v3/libpod/network/cni/network.go:302.48,305.4 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:312.53,314.33 2 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:317.2,317.14 1 51 +github.com/containers/podman/v3/libpod/network/cni/network.go:314.33,316.3 1 64 +github.com/containers/podman/v3/libpod/network/cni/network.go:322.57,324.33 2 138 +github.com/containers/podman/v3/libpod/network/cni/network.go:329.2,329.14 1 138 +github.com/containers/podman/v3/libpod/network/cni/network.go:324.33,325.56 1 115 +github.com/containers/podman/v3/libpod/network/cni/network.go:325.56,327.4 1 111 +github.com/containers/podman/v3/libpod/network/cni/run.go:25.116,29.16 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:33.2,33.25 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:36.2,36.31 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:39.2,39.32 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:42.2,42.46 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:54.2,54.63 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:61.2,61.16 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:65.2,69.15 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:80.2,81.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:85.2,86.46 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:123.2,123.21 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:29.16,31.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:33.25,35.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:36.31,38.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:39.32,41.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:42.46,44.21 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:47.3,48.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:44.21,46.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:48.17,50.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:54.63,56.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:59.3,59.13 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:56.17,58.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:61.16,63.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:69.15,70.20 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:70.20,71.38 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:71.38,73.19 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:73.19,75.6 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:81.16,83.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:86.46,92.91 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:101.3,106.20 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:110.3,112.20 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:115.3,118.20 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:121.3,121.25 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:92.91,96.21 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:96.21,98.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:106.20,108.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:112.20,114.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:118.20,120.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:128.78,131.55 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:138.2,142.35 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:172.2,173.20 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:131.55,133.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:136.3,136.40 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:133.16,135.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:142.35,143.26 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:147.3,147.49 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:150.3,152.9 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:143.26,145.12 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:147.49,149.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:152.9,158.4 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:158.9,160.18 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:163.4,169.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:160.18,162.5 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:177.86,178.33 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:181.1,182.39 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:190.2,190.63 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:193.2,193.12 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:178.33,180.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:182.39,183.47 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:188.3,188.118 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:183.47,184.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:184.29,185.19 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:190.63,192.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:196.141,213.68 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:220.2,220.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:224.2,224.30 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:235.2,235.27 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:242.2,242.20 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:246.2,246.11 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:213.68,214.66 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:214.66,216.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:220.29,222.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:224.30,228.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:228.8,228.36 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:228.36,232.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:235.27,239.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:242.20,244.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:250.90,254.16 4 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:257.2,257.43 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:254.16,256.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:260.90,265.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:269.2,270.46 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:291.2,291.30 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:265.16,267.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:270.46,274.17 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:286.3,287.17 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:274.17,276.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:276.9,279.22 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:283.4,283.32 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:279.22,281.13 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:287.17,289.4 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:294.149,299.16 3 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:305.2,306.16 2 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:309.2,309.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:299.16,301.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:301.8,301.29 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:301.29,303.3 1 0 +github.com/containers/podman/v3/libpod/network/cni/run.go:306.16,308.3 1 0 diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go new file mode 100644 index 00000000000..5574b2b1c5b --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -0,0 +1,397 @@ +// +build linux + +package cni + +import ( + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/containernetworking/cni/libcni" + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string) (*types.Network, error) { + network := types.Network{ + Name: conf.Name, + ID: getNetworkIDFromName(conf.Name), + Labels: map[string]string{}, + Options: map[string]string{}, + IPAMOptions: map[string]string{}, + } + + cniJSON := make(map[string]interface{}) + err := json.Unmarshal(conf.Bytes, &cniJSON) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal network config %s", conf.Name) + } + if args, ok := cniJSON["args"]; ok { + if key, ok := args.(map[string]interface{}); ok { + // read network labels and options from the conf file + network.Labels = getNetworkArgsFromConfList(key, podmanLabelKey) + network.Options = getNetworkArgsFromConfList(key, podmanOptionsKey) + } + } + + f, err := os.Stat(confPath) + if err != nil { + return nil, err + } + stat := f.Sys().(*syscall.Stat_t) + network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + + firstPlugin := conf.Plugins[0] + network.Driver = firstPlugin.Network.Type + + switch firstPlugin.Network.Type { + case types.BridgeNetworkDriver: + var bridge hostLocalBridge + err := json.Unmarshal(firstPlugin.Bytes, &bridge) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal the bridge plugin config in %s", confPath) + } + network.NetworkInterface = bridge.BrName + + // if isGateway is false we have an internal network + if !bridge.IsGW { + network.Internal = true + } + + // set network options + if bridge.MTU != 0 { + network.Options["mtu"] = strconv.Itoa(bridge.MTU) + } + if bridge.Vlan != 0 { + network.Options["vlan"] = strconv.Itoa(bridge.Vlan) + } + + err = convertIPAMConfToNetwork(&network, &bridge.IPAM, confPath) + if err != nil { + return nil, err + } + + case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: + var vlan VLANConfig + err := json.Unmarshal(firstPlugin.Bytes, &vlan) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal the macvlan plugin config in %s", confPath) + } + network.NetworkInterface = vlan.Master + + // set network options + if vlan.MTU != 0 { + network.Options["mtu"] = strconv.Itoa(vlan.MTU) + } + + if vlan.Mode != "" { + network.Options["mode"] = vlan.Mode + } + + err = convertIPAMConfToNetwork(&network, &vlan.IPAM, confPath) + if err != nil { + return nil, err + } + + default: + // A warning would be good but users would get this warning every time so keep this at info level. + logrus.Infof("Unsupported CNI config type %s in %s, this network can still be used but inspect or list cannot show all information", + firstPlugin.Network.Type, confPath) + } + + // check if the dnsname plugin is configured + network.DNSEnabled = findPluginByName(conf.Plugins, "dnsname") + + return &network, nil +} + +func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool { + for _, plugin := range plugins { + if plugin.Network.Type == name { + return true + } + } + return false +} + +// convertIPAMConfToNetwork converts A cni IPAMConfig to libpod network subnets. +// It returns an array of subnets and an extra bool if dhcp is configured. +func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath string) error { + if ipam.PluginType == types.DHCPIPAMDriver { + network.IPAMOptions["driver"] = types.DHCPIPAMDriver + return nil + } + + if ipam.PluginType != types.HostLocalIPAMDriver { + return errors.Errorf("unsupported ipam plugin %s in %s", ipam.PluginType, confPath) + } + + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + for _, r := range ipam.Ranges { + for _, ipam := range r { + s := types.Subnet{} + + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the sub. + _, sub, err := net.ParseCIDR(ipam.Subnet) + if err != nil { + return err + } + s.Subnet = types.IPNet{IPNet: *sub} + + // gateway + var gateway net.IP + if ipam.Gateway != "" { + gateway = net.ParseIP(ipam.Gateway) + if gateway == nil { + return errors.Errorf("failed to parse gateway ip %s", ipam.Gateway) + } + // convert to 4 byte if ipv4 + util.NormalizeIP(&gateway) + } else if !network.Internal { + // only add a gateway address if the network is not internal + gateway, err = util.FirstIPInSubnet(sub) + if err != nil { + return errors.Errorf("failed to get first ip in subnet %s", sub.String()) + } + } + s.Gateway = gateway + + var rangeStart net.IP + var rangeEnd net.IP + if ipam.RangeStart != "" { + rangeStart = net.ParseIP(ipam.RangeStart) + if rangeStart == nil { + return errors.Errorf("failed to parse range start ip %s", ipam.RangeStart) + } + } + if ipam.RangeEnd != "" { + rangeEnd = net.ParseIP(ipam.RangeEnd) + if rangeEnd == nil { + return errors.Errorf("failed to parse range end ip %s", ipam.RangeEnd) + } + } + if rangeStart != nil || rangeEnd != nil { + s.LeaseRange = &types.LeaseRange{} + s.LeaseRange.StartIP = rangeStart + s.LeaseRange.EndIP = rangeEnd + } + if util.IsIPv6(s.Subnet.IP) { + network.IPv6Enabled = true + } + network.Subnets = append(network.Subnets, s) + } + } + return nil +} + +// getNetworkArgsFromConfList returns the map of args in a conflist, argType should be labels or options +func getNetworkArgsFromConfList(args map[string]interface{}, argType string) map[string]string { + if args, ok := args[argType]; ok { + if labels, ok := args.(map[string]interface{}); ok { + result := make(map[string]string, len(labels)) + for k, v := range labels { + if v, ok := v.(string); ok { + result[k] = v + } + } + return result + } + } + return map[string]string{} +} + +// createCNIConfigListFromNetwork will create a cni config file from the given network. +// It returns the cni config and the path to the file where the config was written. +// Set writeToDisk to false to only add this network into memory. +func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writeToDisk bool) (*libcni.NetworkConfigList, string, error) { + var ( + routes []ipamRoute + ipamRanges [][]ipamLocalHostRangeConf + ipamConf ipamConfig + err error + ) + if len(network.Subnets) > 0 { + defIpv4Route := false + defIpv6Route := false + for _, subnet := range network.Subnets { + ipam := newIPAMLocalHostRange(subnet.Subnet, subnet.LeaseRange, subnet.Gateway) + ipamRanges = append(ipamRanges, []ipamLocalHostRangeConf{*ipam}) + + // only add default route for not internal networks + if !network.Internal { + ipv6 := util.IsIPv6(subnet.Subnet.IP) + if !ipv6 && defIpv4Route { + continue + } + if ipv6 && defIpv6Route { + continue + } + + if ipv6 { + defIpv6Route = true + } else { + defIpv4Route = true + } + route, err := newIPAMDefaultRoute(ipv6) + if err != nil { + return nil, "", err + } + routes = append(routes, route) + } + } + ipamConf = newIPAMHostLocalConf(routes, ipamRanges) + } else { + ipamConf = ipamConfig{PluginType: "dhcp"} + } + + vlan := 0 + mtu := 0 + vlanPluginMode := "" + for k, v := range network.Options { + switch k { + case "mtu": + mtu, err = internalutil.ParseMTU(v) + if err != nil { + return nil, "", err + } + + case "vlan": + vlan, err = internalutil.ParseVlan(v) + if err != nil { + return nil, "", err + } + + case "mode": + switch network.Driver { + case types.MacVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { + return nil, "", errors.Errorf("unknown macvlan mode %q", v) + } + case types.IPVLANNetworkDriver: + if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { + return nil, "", errors.Errorf("unknown ipvlan mode %q", v) + } + default: + return nil, "", errors.Errorf("cannot set option \"mode\" with driver %q", network.Driver) + } + vlanPluginMode = v + + default: + return nil, "", errors.Errorf("unsupported network option %s", k) + } + } + + isGateway := true + ipMasq := true + if network.Internal { + isGateway = false + ipMasq = false + } + // create CNI plugin configuration + // explicitly use CNI version 0.4.0 here, to use v1.0.0 at least containernetwork-plugins-1.0.1 has to be installed + // the dnsname plugin also needs to be updated for 1.0.0 + // TODO change to 1.0.0 when most distros support it + ncList := newNcList(network.Name, "0.4.0", network.Labels, network.Options) + var plugins []interface{} + + switch network.Driver { + case types.BridgeNetworkDriver: + bridge := newHostLocalBridge(network.NetworkInterface, isGateway, ipMasq, mtu, vlan, &ipamConf) + plugins = append(plugins, bridge, newPortMapPlugin(), newFirewallPlugin(), newTuningPlugin()) + // if we find the dnsname plugin we add configuration for it + if hasDNSNamePlugin(n.cniPluginDirs) && network.DNSEnabled { + // Note: in the future we might like to allow for dynamic domain names + plugins = append(plugins, newDNSNamePlugin(defaultPodmanDomainName)) + } + + case types.MacVLANNetworkDriver: + plugins = append(plugins, newVLANPlugin(types.MacVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, &ipamConf)) + + case types.IPVLANNetworkDriver: + plugins = append(plugins, newVLANPlugin(types.IPVLANNetworkDriver, network.NetworkInterface, vlanPluginMode, mtu, &ipamConf)) + + default: + return nil, "", errors.Errorf("driver %q is not supported by cni", network.Driver) + } + ncList["plugins"] = plugins + b, err := json.MarshalIndent(ncList, "", " ") + if err != nil { + return nil, "", err + } + cniPathName := "" + if writeToDisk { + cniPathName = filepath.Join(n.cniConfigDir, network.Name+".conflist") + err = ioutil.WriteFile(cniPathName, b, 0644) + if err != nil { + return nil, "", err + } + f, err := os.Stat(cniPathName) + if err != nil { + return nil, "", err + } + stat := f.Sys().(*syscall.Stat_t) + network.Created = time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)) + } else { + network.Created = time.Now() + } + config, err := libcni.ConfListFromBytes(b) + if err != nil { + return nil, "", err + } + return config, cniPathName, nil +} + +func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry, error) { + cniPorts := make([]cniPortMapEntry, 0, len(ports)) + for _, port := range ports { + if port.Protocol == "" { + return nil, errors.New("port protocol should not be empty") + } + protocols := strings.Split(port.Protocol, ",") + + for _, protocol := range protocols { + if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) { + return nil, errors.Errorf("unknown port protocol %s", protocol) + } + cniPort := cniPortMapEntry{ + HostPort: int(port.HostPort), + ContainerPort: int(port.ContainerPort), + HostIP: port.HostIP, + Protocol: protocol, + } + cniPorts = append(cniPorts, cniPort) + for i := 1; i < int(port.Range); i++ { + cniPort := cniPortMapEntry{ + HostPort: int(port.HostPort) + i, + ContainerPort: int(port.ContainerPort) + i, + HostIP: port.HostIP, + Protocol: protocol, + } + cniPorts = append(cniPorts, cniPort) + } + } + } + return cniPorts, nil +} + +func removeMachinePlugin(conf *libcni.NetworkConfigList) *libcni.NetworkConfigList { + plugins := make([]*libcni.NetworkConfig, 0, len(conf.Plugins)) + for _, net := range conf.Plugins { + if net.Network.Type != "podman-machine" { + plugins = append(plugins, net) + } + } + conf.Plugins = plugins + return conf +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go new file mode 100644 index 00000000000..c66e7ef5d7a --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go @@ -0,0 +1,110 @@ +// Copyright 2016 CNI authors +// Copyright 2021 Podman authors +// +// This code has been originally copied from github.com/containernetworking/cni +// but has been changed to better fit the Podman use case. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package cni + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/version" + "github.com/containers/storage/pkg/unshare" +) + +type cniExec struct { + version.PluginDecoder +} + +type cniPluginError struct { + plugin string + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +// Error returns a nicely formatted error message for the cni plugin errors. +func (e *cniPluginError) Error() string { + err := fmt.Sprintf("cni plugin %s failed", e.plugin) + if e.Msg != "" { + err = fmt.Sprintf("%s: %s", err, e.Msg) + } else if e.Code > 0 { + err = fmt.Sprintf("%s with error code %d", err, e.Code) + } + if e.Details != "" { + err = fmt.Sprintf("%s: %s", err, e.Details) + } + return err +} + +// ExecPlugin execute the cni plugin. Returns the stdout of the plugin or an error. +func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // The dnsname plugin tries to use XDG_RUNTIME_DIR to store files. + // podman run will have XDG_RUNTIME_DIR set and thus the cni plugin can use + // it. The problem is that XDG_RUNTIME_DIR is unset for the conmon process + // for rootful users. This causes issues since the cleanup process is spawned + // by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run. + // Because of it dnsname will not find the config files and cannot correctly cleanup. + // To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful. + if !unshare.IsRootless() { + c.Env = append(c.Env, "XDG_RUNTIME_DIR=") + } + + err := c.Run() + if err != nil { + return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes()) + } + return stdout.Bytes(), nil +} + +// annotatePluginError parses the common cni plugin error json. +func annotatePluginError(err error, plugin string, stdout, stderr []byte) error { + pluginName := filepath.Base(plugin) + emsg := cniPluginError{ + plugin: pluginName, + } + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = err.Error() + } else { + emsg.Msg = string(stderr) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("failed to unmarshal error message %q: %v", string(stdout), perr) + } + return &emsg +} + +// FindInPath finds the plugin in the given paths. +func (e *cniExec) FindInPath(plugin string, paths []string) (string, error) { + return invoke.FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go new file mode 100644 index 00000000000..fbfcd49ad37 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -0,0 +1,281 @@ +// +build linux + +package cni + +import ( + "net" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/types" +) + +const ( + defaultIPv4Route = "0.0.0.0/0" + defaultIPv6Route = "::/0" + // defaultPodmanDomainName is used for the dnsname plugin to define + // a localized domain name for a created network + defaultPodmanDomainName = "dns.podman" + + // cniDeviceName is the default name for a new bridge, it should be suffixed with an integer + cniDeviceName = "cni-podman" + + // podmanLabelKey key used to store the podman network label in a cni config + podmanLabelKey = "podman_labels" + + // podmanOptionsKey key used to store the podman network options in a cni config + podmanOptionsKey = "podman_options" +) + +// cniPortMapEntry struct is used by the portmap plugin +// https://github.com/containernetworking/plugins/blob/649e0181fe7b3a61e708f3e4249a798f57f25cc5/plugins/meta/portmap/main.go#L43-L50 +type cniPortMapEntry struct { + HostPort int `json:"hostPort"` + ContainerPort int `json:"containerPort"` + Protocol string `json:"protocol"` + HostIP string `json:"hostIP,omitempty"` +} + +// hostLocalBridge describes a configuration for a bridge plugin +// https://github.com/containernetworking/plugins/tree/master/plugins/main/bridge#network-configuration-reference +type hostLocalBridge struct { + PluginType string `json:"type"` + BrName string `json:"bridge,omitempty"` + IsGW bool `json:"isGateway"` + IsDefaultGW bool `json:"isDefaultGateway,omitempty"` + ForceAddress bool `json:"forceAddress,omitempty"` + IPMasq bool `json:"ipMasq,omitempty"` + MTU int `json:"mtu,omitempty"` + HairpinMode bool `json:"hairpinMode,omitempty"` + PromiscMode bool `json:"promiscMode,omitempty"` + Vlan int `json:"vlan,omitempty"` + IPAM ipamConfig `json:"ipam"` + Capabilities map[string]bool `json:"capabilities,omitempty"` +} + +// ipamConfig describes an IPAM configuration +// https://github.com/containernetworking/plugins/tree/master/plugins/ipam/host-local#network-configuration-reference +type ipamConfig struct { + PluginType string `json:"type"` + Routes []ipamRoute `json:"routes,omitempty"` + ResolveConf string `json:"resolveConf,omitempty"` + DataDir string `json:"dataDir,omitempty"` + Ranges [][]ipamLocalHostRangeConf `json:"ranges,omitempty"` +} + +// ipamLocalHostRangeConf describes the new style IPAM ranges +type ipamLocalHostRangeConf struct { + Subnet string `json:"subnet"` + RangeStart string `json:"rangeStart,omitempty"` + RangeEnd string `json:"rangeEnd,omitempty"` + Gateway string `json:"gateway,omitempty"` +} + +// ipamRoute describes a route in an ipam config +type ipamRoute struct { + Dest string `json:"dst"` +} + +// portMapConfig describes the default portmapping config +type portMapConfig struct { + PluginType string `json:"type"` + Capabilities map[string]bool `json:"capabilities"` +} + +// VLANConfig describes the macvlan config +type VLANConfig struct { + PluginType string `json:"type"` + Master string `json:"master"` + IPAM ipamConfig `json:"ipam"` + MTU int `json:"mtu,omitempty"` + Mode string `json:"mode,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` +} + +// firewallConfig describes the firewall plugin +type firewallConfig struct { + PluginType string `json:"type"` + Backend string `json:"backend"` +} + +// tuningConfig describes the tuning plugin +type tuningConfig struct { + PluginType string `json:"type"` +} + +// dnsNameConfig describes the dns container name resolution plugin config +type dnsNameConfig struct { + PluginType string `json:"type"` + DomainName string `json:"domainName"` + Capabilities map[string]bool `json:"capabilities"` +} + +// ncList describes a generic map +type ncList map[string]interface{} + +// newNcList creates a generic map of values with string +// keys and adds in version and network name +func newNcList(name, version string, labels, options map[string]string) ncList { + n := ncList{} + n["cniVersion"] = version + n["name"] = name + args := map[string]map[string]string{} + if len(labels) > 0 { + args[podmanLabelKey] = labels + } + if len(options) > 0 { + args[podmanOptionsKey] = options + } + if len(args) > 0 { + n["args"] = args + } + return n +} + +// newHostLocalBridge creates a new LocalBridge for host-local +func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipamConf *ipamConfig) *hostLocalBridge { + caps := make(map[string]bool) + caps["ips"] = true + bridge := hostLocalBridge{ + PluginType: "bridge", + BrName: name, + IsGW: isGateWay, + IPMasq: ipMasq, + MTU: mtu, + HairpinMode: true, + Vlan: vlan, + IPAM: *ipamConf, + } + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipamConf.PluginType == types.HostLocalIPAMDriver { + bridge.Capabilities = caps + } + return &bridge +} + +// newIPAMHostLocalConf creates a new IPAMHostLocal configuration +func newIPAMHostLocalConf(routes []ipamRoute, ipamRanges [][]ipamLocalHostRangeConf) ipamConfig { + ipamConf := ipamConfig{ + PluginType: "host-local", + Routes: routes, + } + + ipamConf.Ranges = ipamRanges + return ipamConf +} + +// newIPAMLocalHostRange create a new IPAM range +func newIPAMLocalHostRange(subnet types.IPNet, leaseRange *types.LeaseRange, gw net.IP) *ipamLocalHostRangeConf { + hostRange := &ipamLocalHostRangeConf{ + Subnet: subnet.String(), + } + + // a user provided a range, we add it here + if leaseRange != nil { + if leaseRange.StartIP != nil { + hostRange.RangeStart = leaseRange.StartIP.String() + } + if leaseRange.EndIP != nil { + hostRange.RangeEnd = leaseRange.EndIP.String() + } + } + + if gw != nil { + hostRange.Gateway = gw.String() + } + return hostRange +} + +// newIPAMRoute creates a new IPAM route configuration +// nolint:interfacer +func newIPAMRoute(r *net.IPNet) ipamRoute { + return ipamRoute{Dest: r.String()} +} + +// newIPAMDefaultRoute creates a new IPAMDefault route of +// 0.0.0.0/0 for IPv4 or ::/0 for IPv6 +func newIPAMDefaultRoute(isIPv6 bool) (ipamRoute, error) { + route := defaultIPv4Route + if isIPv6 { + route = defaultIPv6Route + } + _, n, err := net.ParseCIDR(route) + if err != nil { + return ipamRoute{}, err + } + return newIPAMRoute(n), nil +} + +// newPortMapPlugin creates a predefined, default portmapping +// configuration +func newPortMapPlugin() portMapConfig { + caps := make(map[string]bool) + caps["portMappings"] = true + p := portMapConfig{ + PluginType: "portmap", + Capabilities: caps, + } + return p +} + +// newFirewallPlugin creates a generic firewall plugin +func newFirewallPlugin() firewallConfig { + return firewallConfig{ + PluginType: "firewall", + } +} + +// newTuningPlugin creates a generic tuning section +func newTuningPlugin() tuningConfig { + return tuningConfig{ + PluginType: "tuning", + } +} + +// newDNSNamePlugin creates the dnsname config with a given +// domainname +func newDNSNamePlugin(domainName string) dnsNameConfig { + caps := make(map[string]bool, 1) + caps["aliases"] = true + return dnsNameConfig{ + PluginType: "dnsname", + DomainName: domainName, + Capabilities: caps, + } +} + +// hasDNSNamePlugin looks to see if the dnsname cni plugin is present +func hasDNSNamePlugin(paths []string) bool { + for _, p := range paths { + if _, err := os.Stat(filepath.Join(p, "dnsname")); err == nil { + return true + } + } + return false +} + +// newVLANPlugin creates a macvlanconfig with a given device name +func newVLANPlugin(pluginType, device, mode string, mtu int, ipam *ipamConfig) VLANConfig { + m := VLANConfig{ + PluginType: pluginType, + IPAM: *ipam, + } + if mtu > 0 { + m.MTU = mtu + } + if len(mode) > 0 { + m.Mode = mode + } + // CNI is supposed to use the default route if a + // parent device is not provided + if len(device) > 0 { + m.Master = device + } + caps := make(map[string]bool) + caps["ips"] = true + // if we use host-local set the ips cap to ensure we can set static ips via runtime config + if ipam.PluginType == types.HostLocalIPAMDriver { + m.Capabilities = caps + } + return m +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go new file mode 100644 index 00000000000..b1f89400cf3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -0,0 +1,208 @@ +// +build linux + +package cni + +import ( + "net" + "os" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +// nolint:gocritic +func (n *cniNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.libpodNet.Name] = network + return *network.libpodNet, nil +} + +// networkCreate will fill out the given network struct and return the new network entry. +// If defaultNet is true it will not validate against used subnets and it will not write the cni config to disk. +func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks) + if err != nil { + return nil, err + } + case types.MacVLANNetworkDriver, types.IPVLANNetworkDriver: + err = createIPMACVLAN(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + err = internalutil.ValidateSubnets(newNetwork, !newNetwork.Internal, usedNetworks) + if err != nil { + return nil, err + } + + // generate the network ID + newNetwork.ID = getNetworkIDFromName(newNetwork.Name) + + // FIXME: Should this be a hard error? + if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) { + logrus.Warnf("dnsname and internal networks are incompatible. dnsname plugin not configured for network %s", newNetwork.Name) + newNetwork.DNSEnabled = false + } + + cniConf, path, err := n.createCNIConfigListFromNetwork(newNetwork, !defaultNet) + if err != nil { + return nil, err + } + return &network{cniNet: cniConf, libpodNet: newNetwork, filename: path}, nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *cniNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.libpodNet.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + // Remove the bridge network interface on the host. + if network.libpodNet.Driver == types.BridgeNetworkDriver { + link, err := netlink.LinkByName(network.libpodNet.NetworkInterface) + if err == nil { + err = netlink.LinkDel(link) + // only log the error, it is not fatal + if err != nil { + logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err) + } + } + } + + file := network.filename + delete(n.networks, network.libpodNet.Name) + + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *cniNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net.libpodNet) { + continue outer + } + } + networks = append(networks, *net.libpodNet) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *cniNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network.libpodNet, nil +} + +func createIPMACVLAN(network *types.Network) error { + if network.Internal { + return errors.New("internal is not supported with macvlan") + } + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + if len(network.Subnets) == 0 { + network.IPAMOptions["driver"] = types.DHCPIPAMDriver + } else { + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go new file mode 100644 index 00000000000..95822723525 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -0,0 +1,275 @@ +// +build linux + +package cni + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containernetworking/cni/libcni" + "github.com/containers/common/libnetwork/types" + "github.com/containers/storage/pkg/lockfile" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type cniNetwork struct { + // cniConfigDir is directory where the cni config files are stored. + cniConfigDir string + // cniPluginDirs is a list of directories where cni should look for the plugins. + cniPluginDirs []string + + cniConf *libcni.CNIConfig + + // defaultNetwork is the name for the default network. + defaultNetwork string + // defaultSubnet is the default subnet for the default network. + defaultSubnet types.IPNet + + // isMachine describes whenever podman runs in a podman machine environment. + isMachine bool + + // lock is a internal lock for critical operations + lock lockfile.Locker + + // modTime is the timestamp when the config dir was modified + modTime time.Time + + // networks is a map with loaded networks, the key is the network name + networks map[string]*network +} + +type network struct { + // filename is the full path to the cni config file on disk + filename string + libpodNet *types.Network + cniNet *libcni.NetworkConfigList +} + +type InitConfig struct { + // CNIConfigDir is directory where the cni config files are stored. + CNIConfigDir string + // CNIPluginDirs is a list of directories where cni should look for the plugins. + CNIPluginDirs []string + + // DefaultNetwork is the name for the default network. + DefaultNetwork string + // DefaultSubnet is the default subnet for the default network. + DefaultSubnet string + + // IsMachine describes whenever podman runs in a podman machine environment. + IsMachine bool +} + +// NewCNINetworkInterface creates the ContainerNetwork interface for the CNI backend. +// Note: The networks are not loaded from disk until a method is called. +func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { + // TODO: consider using a shared memory lock + lock, err := lockfile.GetLockfile(filepath.Join(conf.CNIConfigDir, "cni.lock")) + if err != nil { + return nil, err + } + + defaultNetworkName := conf.DefaultNetwork + if defaultNetworkName == "" { + defaultNetworkName = types.DefaultNetworkName + } + + defaultSubnet := conf.DefaultSubnet + if defaultSubnet == "" { + defaultSubnet = types.DefaultSubnet + } + defaultNet, err := types.ParseCIDR(defaultSubnet) + if err != nil { + return nil, errors.Wrap(err, "failed to parse default subnet") + } + + cni := libcni.NewCNIConfig(conf.CNIPluginDirs, &cniExec{}) + n := &cniNetwork{ + cniConfigDir: conf.CNIConfigDir, + cniPluginDirs: conf.CNIPluginDirs, + cniConf: cni, + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + isMachine: conf.IsMachine, + lock: lock, + } + + return n, nil +} + +// Drivers will return the list of supported network drivers +// for this interface. +func (n *cniNetwork) Drivers() []string { + return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver, types.IPVLANNetworkDriver} +} + +// DefaultNetworkName will return the default cni network name. +func (n *cniNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + +func (n *cniNetwork) loadNetworks() error { + // check the mod time of the config dir + f, err := os.Stat(n.cniConfigDir) + if err != nil { + return err + } + modTime := f.ModTime() + + // skip loading networks if they are already loaded and + // if the config dir was not modified since the last call + if n.networks != nil && modTime.Equal(n.modTime) { + return nil + } + // make sure the remove all networks before we reload them + n.networks = nil + n.modTime = modTime + + // FIXME: do we have to support other file types as well, e.g. .conf? + files, err := libcni.ConfFiles(n.cniConfigDir, []string{".conflist"}) + if err != nil { + return err + } + networks := make(map[string]*network, len(files)) + for _, file := range files { + conf, err := libcni.ConfListFromFile(file) + if err != nil { + // do not log ENOENT errors + if !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Error loading CNI config file %s: %v", file, err) + } + continue + } + + if !types.NameRegex.MatchString(conf.Name) { + logrus.Warnf("CNI config list %s has invalid name, skipping: %v", file, types.RegexError) + continue + } + + // podman < v4.0 used the podman-machine cni plugin for podman machine port forwarding + // since this is now build into podman we no longer use the plugin + // old configs may still contain it so we just remove it here + if n.isMachine { + conf = removeMachinePlugin(conf) + } + + if _, err := n.cniConf.ValidateNetworkList(context.Background(), conf); err != nil { + logrus.Warnf("Error validating CNI config file %s: %v", file, err) + continue + } + + if val, ok := networks[conf.Name]; ok { + logrus.Warnf("CNI config list %s has the same network name as %s, skipping", file, val.filename) + continue + } + + net, err := createNetworkFromCNIConfigList(conf, file) + if err != nil { + logrus.Errorf("CNI config list %s could not be converted to a libpod config, skipping: %v", file, err) + continue + } + logrus.Debugf("Successfully loaded network %s: %v", net.Name, net) + networkInfo := network{ + filename: file, + cniNet: conf, + libpodNet: net, + } + networks[net.Name] = &networkInfo + } + + // create the default network in memory if it did not exists on disk + if networks[n.defaultNetwork] == nil { + networkInfo, err := n.createDefaultNetwork() + if err != nil { + return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + } + networks[n.defaultNetwork] = networkInfo + } + + logrus.Debugf("Successfully loaded %d networks", len(networks)) + n.networks = networks + return nil +} + +func (n *cniNetwork) createDefaultNetwork() (*network, error) { + net := types.Network{ + Name: n.defaultNetwork, + NetworkInterface: "cni-podman0", + Driver: types.BridgeNetworkDriver, + Subnets: []types.Subnet{ + {Subnet: n.defaultSubnet}, + }, + } + return n.networkCreate(&net, true) +} + +// getNetwork will lookup a network by name or ID. It returns an +// error when no network was found or when more than one network +// with the given (partial) ID exists. +// getNetwork will read from the networks map, therefore the caller +// must ensure that n.lock is locked before using it. +func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) { + // fast path check the map key, this will only work for names + if val, ok := n.networks[nameOrID]; ok { + return val, nil + } + // If there was no match we might got a full or partial ID. + var net *network + for _, val := range n.networks { + // This should not happen because we already looked up the map by name but check anyway. + if val.libpodNet.Name == nameOrID { + return val, nil + } + + if strings.HasPrefix(val.libpodNet.ID, nameOrID) { + if net != nil { + return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + } + net = val + } + } + if net != nil { + return net, nil + } + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) +} + +// getNetworkIDFromName creates a network ID from the name. It is just the +// sha256 hash so it is not safe but it should be safe enough for our use case. +func getNetworkIDFromName(name string) string { + hash := sha256.Sum256([]byte(name)) + return hex.EncodeToString(hash[:]) +} + +// Implement the NetUtil interface for easy code sharing with other network interfaces. + +// ForEach call the given function for each network +func (n *cniNetwork) ForEach(run func(types.Network)) { + for _, val := range n.networks { + run(*val.libpodNet) + } +} + +// Len return the number of networks +func (n *cniNetwork) Len() int { + return len(n.networks) +} + +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *cniNetwork) DefaultInterfaceName() string { + return cniDeviceName +} + +func (n *cniNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err + } + return network.libpodNet, err +} diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go new file mode 100644 index 00000000000..af05d9d9d5c --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -0,0 +1,274 @@ +// +build linux + +package cni + +import ( + "context" + "net" + "os" + "strings" + + "github.com/containernetworking/cni/libcni" + cnitypes "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// Setup will setup the container network namespace. It returns +// a map of StatusBlocks, the key is the network name. +func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err + } + + // set the loopback adapter up in the container netns + err = ns.WithNetNSPath(namespacePath, func(_ ns.NetNS) error { + link, err := netlink.LinkByName("lo") + if err == nil { + err = netlink.LinkSetUp(link) + } + return err + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to set the loopback adapter up") + } + + var retErr error + teardownOpts := options + teardownOpts.Networks = map[string]types.PerNetworkOptions{} + // make sure to teardown the already connected networks on error + defer func() { + if retErr != nil { + if len(teardownOpts.Networks) > 0 { + err := n.teardown(namespacePath, types.TeardownOptions(teardownOpts)) + if err != nil { + logrus.Warn(err) + } + } + } + }() + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return nil, err + } + + results := make(map[string]types.StatusBlock, len(options.Networks)) + for name, netOpts := range options.Networks { + netOpts := netOpts + network := n.networks[name] + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + // If we have more than one static ip we need parse the ips via runtime config, + // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips + if len(netOpts.StaticIPs) > 0 && !network.cniNet.Plugins[0].Network.Capabilities["ips"] { + caps := make(map[string]interface{}) + caps["capabilities"] = map[string]bool{"ips": true} + network.cniNet.Plugins[0], retErr = libcni.InjectConf(network.cniNet.Plugins[0], caps) + if retErr != nil { + return nil, retErr + } + } + + var res cnitypes.Result + res, retErr = n.cniConf.AddNetworkList(context.Background(), network.cniNet, rt) + // Add this network to teardown opts since it is now connected. + // Also add this if an errors was returned since we want to call teardown on this regardless. + teardownOpts.Networks[name] = netOpts + if retErr != nil { + return nil, retErr + } + + logrus.Debugf("cni result for container %s network %s: %v", options.ContainerID, name, res) + var status types.StatusBlock + status, retErr = CNIResultToStatus(res) + if retErr != nil { + return nil, retErr + } + results[name] = status + } + return results, nil +} + +// CNIResultToStatus convert the cni result to status block +// nolint:golint +func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) { + result := types.StatusBlock{} + cniResult, err := types040.GetResult(res) + if err != nil { + return result, err + } + nameservers := make([]net.IP, 0, len(cniResult.DNS.Nameservers)) + for _, nameserver := range cniResult.DNS.Nameservers { + ip := net.ParseIP(nameserver) + if ip == nil { + return result, errors.Errorf("failed to parse cni nameserver ip %s", nameserver) + } + nameservers = append(nameservers, ip) + } + result.DNSServerIPs = nameservers + result.DNSSearchDomains = cniResult.DNS.Search + + interfaces := make(map[string]types.NetInterface) + for _, ip := range cniResult.IPs { + if ip.Interface == nil { + // we do no expect ips without an interface + continue + } + if len(cniResult.Interfaces) <= *ip.Interface { + return result, errors.Errorf("invalid cni result, interface index %d out of range", *ip.Interface) + } + cniInt := cniResult.Interfaces[*ip.Interface] + netInt, ok := interfaces[cniInt.Name] + if ok { + netInt.Subnets = append(netInt.Subnets, types.NetAddress{ + IPNet: types.IPNet{IPNet: ip.Address}, + Gateway: ip.Gateway, + }) + interfaces[cniInt.Name] = netInt + } else { + mac, err := net.ParseMAC(cniInt.Mac) + if err != nil { + return result, err + } + interfaces[cniInt.Name] = types.NetInterface{ + MacAddress: types.HardwareAddr(mac), + Subnets: []types.NetAddress{{ + IPNet: types.IPNet{IPNet: ip.Address}, + Gateway: ip.Gateway, + }}, + } + } + } + result.Interfaces = interfaces + return result, nil +} + +func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPortMapEntry, opts *types.PerNetworkOptions) *libcni.RuntimeConf { + rt := &libcni.RuntimeConf{ + ContainerID: conID, + NetNS: netns, + IfName: opts.InterfaceName, + Args: [][2]string{ + {"IgnoreUnknown", "1"}, + // Do not set the K8S env vars, see https://github.com/containers/podman/issues/12083. + // Only K8S_POD_NAME is used by dnsname to get the container name. + {"K8S_POD_NAME", conName}, + }, + CapabilityArgs: map[string]interface{}{}, + } + + // Propagate environment CNI_ARGS + for _, kvpairs := range strings.Split(os.Getenv("CNI_ARGS"), ";") { + if keyval := strings.SplitN(kvpairs, "=", 2); len(keyval) == 2 { + rt.Args = append(rt.Args, [2]string{keyval[0], keyval[1]}) + } + } + + // Add mac address to cni args + if len(opts.StaticMAC) > 0 { + rt.Args = append(rt.Args, [2]string{"MAC", opts.StaticMAC.String()}) + } + + if len(opts.StaticIPs) == 1 { + // Add a single IP to the args field. CNI plugins < 1.0.0 + // do not support multiple ips via capability args. + rt.Args = append(rt.Args, [2]string{"IP", opts.StaticIPs[0].String()}) + } else if len(opts.StaticIPs) > 1 { + // Set the static ips in the capability args + // to support more than one static ip per network. + rt.CapabilityArgs["ips"] = opts.StaticIPs + } + + // Set network aliases for the dnsname plugin. + if len(opts.Aliases) > 0 { + rt.CapabilityArgs["aliases"] = map[string][]string{ + networkName: opts.Aliases, + } + } + + // Set PortMappings in Capabilities + if len(ports) > 0 { + rt.CapabilityArgs["portMappings"] = ports + } + + return rt +} + +// Teardown will teardown the container network namespace. +func (n *cniNetwork) Teardown(namespacePath string, options types.TeardownOptions) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + return n.teardown(namespacePath, options) +} + +func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOptions) error { + // Note: An empty namespacePath is allowed because some plugins + // still need teardown, for example ipam should remove used ip allocations. + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return err + } + + var multiErr *multierror.Error + for name, netOpts := range options.Networks { + netOpts := netOpts + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) + if err == nil { + rt = newRt + } else { + logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) + network := n.networks[name] + if network == nil { + multiErr = multierror.Append(multiErr, errors.Wrapf(types.ErrNoSuchNetwork, "network %s", name)) + continue + } + cniConfList = network.cniNet + } + + err = n.cniConf.DelNetworkList(context.Background(), cniConfList, rt) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + } + return multiErr.ErrorOrNil() +} + +func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.RuntimeConf) (*libcni.NetworkConfigList, *libcni.RuntimeConf, error) { + cniConfList := &libcni.NetworkConfigList{ + Name: name, + } + confBytes, rt, err := cniConf.GetNetworkListCachedConfig(cniConfList, rt) + if err != nil { + return nil, nil, err + } else if confBytes == nil { + return nil, nil, errors.Errorf("network %s not found in CNI cache", name) + } + + cniConfList, err = libcni.ConfListFromBytes(confBytes) + if err != nil { + return nil, nil, err + } + return cniConfList, rt, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go new file mode 100644 index 00000000000..27ad0a4fbbc --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -0,0 +1,68 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + pkgutil "github.com/containers/common/pkg/util" + "github.com/pkg/errors" +) + +func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet) error { + if network.NetworkInterface != "" { + bridges := GetBridgeInterfaceNames(n) + if pkgutil.StringInSlice(network.NetworkInterface, bridges) { + return errors.Errorf("bridge name %s already in use", network.NetworkInterface) + } + if !types.NameRegex.MatchString(network.NetworkInterface) { + return errors.Wrapf(types.RegexError, "bridge name %s invalid", network.NetworkInterface) + } + } else { + var err error + network.NetworkInterface, err = GetFreeDeviceName(n) + if err != nil { + return err + } + } + + if network.IPAMOptions["driver"] != types.DHCPIPAMDriver { + if len(network.Subnets) == 0 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + // ipv6 enabled means dual stack, check if we already have + // a ipv4 or ipv6 subnet and add one if not. + if network.IPv6Enabled { + ipv4 := false + ipv6 := false + for _, subnet := range network.Subnets { + if util.IsIPv6(subnet.Subnet.IP) { + ipv6 = true + } + if util.IsIPv4(subnet.Subnet.IP) { + ipv4 = true + } + } + if !ipv4 { + freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + if !ipv6 { + freeSubnet, err := GetFreeIPv6NetworkSubnet(usedNetworks) + if err != nil { + return err + } + network.Subnets = append(network.Subnets, *freeSubnet) + } + } + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/create.go b/vendor/github.com/containers/common/libnetwork/internal/util/create.go new file mode 100644 index 00000000000..ccb0f001a51 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/create.go @@ -0,0 +1,41 @@ +package util + +import ( + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" +) + +func CommonNetworkCreate(n NetUtil, network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + var name string + var err error + // validate the name when given + if network.Name != "" { + if !types.NameRegex.MatchString(network.Name) { + return errors.Wrapf(types.RegexError, "network name %s invalid", network.Name) + } + if _, err := n.Network(network.Name); err == nil { + return errors.Wrapf(types.ErrNetworkExists, "network name %s already used", network.Name) + } + } else { + name, err = GetFreeDeviceName(n) + if err != nil { + return err + } + network.Name = name + // also use the name as interface name when we create a bridge network + if network.Driver == types.BridgeNetworkDriver && network.NetworkInterface == "" { + network.NetworkInterface = name + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go new file mode 100644 index 00000000000..650fcb193c9 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go @@ -0,0 +1,19 @@ +package util + +import "github.com/containers/common/libnetwork/types" + +// This is a helper package to allow code sharing between the different +// network interfaces. + +// NetUtil is a helper interface which all network interfaces should implement to allow easy code sharing +type NetUtil interface { + // ForEach eaxecutes the given function for each network + ForEach(func(types.Network)) + // Len returns the number of networks + Len() int + // DefaultInterfaceName return the default interface name, this will be suffixed by a number + DefaultInterfaceName() string + // Network returns the network with the given name or ID. + // It returns an error if the network is not found + Network(nameOrID string) (*types.Network, error) +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go new file mode 100644 index 00000000000..20819f75663 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interfaces.go @@ -0,0 +1,34 @@ +package util + +import "net" + +// getLiveNetworkSubnets returns a slice of subnets representing what the system +// has defined as network interfaces +func getLiveNetworkSubnets() ([]*net.IPNet, error) { + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, err + } + nets := make([]*net.IPNet, 0, len(addrs)) + for _, address := range addrs { + _, n, err := net.ParseCIDR(address.String()) + if err != nil { + return nil, err + } + nets = append(nets, n) + } + return nets, nil +} + +// GetLiveNetworkNames returns a list of network interface names on the system +func GetLiveNetworkNames() ([]string, error) { + liveInterfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + interfaceNames := make([]string, 0, len(liveInterfaces)) + for _, i := range liveInterfaces { + interfaceNames = append(interfaceNames, i.Name) + } + return interfaceNames, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/ip.go b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go new file mode 100644 index 00000000000..8f00a2a5560 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/ip.go @@ -0,0 +1,70 @@ +package util + +import ( + "crypto/rand" + "net" + + "github.com/pkg/errors" +) + +func incByte(subnet *net.IPNet, idx int, shift uint) error { + if idx < 0 { + return errors.New("no more subnets left") + } + if subnet.IP[idx] == 255 { + subnet.IP[idx] = 0 + return incByte(subnet, idx-1, 0) + } + subnet.IP[idx] += 1 << shift + return nil +} + +// NextSubnet returns subnet incremented by 1 +func NextSubnet(subnet *net.IPNet) (*net.IPNet, error) { + newSubnet := &net.IPNet{ + IP: subnet.IP, + Mask: subnet.Mask, + } + ones, bits := newSubnet.Mask.Size() + if ones == 0 { + return nil, errors.Errorf("%s has only one subnet", subnet.String()) + } + zeroes := uint(bits - ones) + shift := zeroes % 8 + idx := ones/8 - 1 + if idx < 0 { + idx = 0 + } + if err := incByte(newSubnet, idx, shift); err != nil { + return nil, err + } + return newSubnet, nil +} + +func NetworkIntersectsWithNetworks(n *net.IPNet, networklist []*net.IPNet) bool { + for _, nw := range networklist { + if networkIntersect(n, nw) { + return true + } + } + return false +} + +func networkIntersect(n1, n2 *net.IPNet) bool { + return n2.Contains(n1.IP) || n1.Contains(n2.IP) +} + +// getRandomIPv6Subnet returns a random internal ipv6 subnet as described in RFC3879. +func getRandomIPv6Subnet() (net.IPNet, error) { + ip := make(net.IP, 8, net.IPv6len) + // read 8 random bytes + _, err := rand.Read(ip) + if err != nil { + return net.IPNet{}, err + } + // first byte must be FD as per RFC3879 + ip[0] = 0xfd + // add 8 zero bytes + ip = append(ip, make([]byte, 8)...) + return net.IPNet{IP: ip, Mask: net.CIDRMask(64, 128)}, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/parse.go b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go new file mode 100644 index 00000000000..1f68df0bb0d --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/parse.go @@ -0,0 +1,37 @@ +package util + +import ( + "strconv" + + "github.com/pkg/errors" +) + +// ParseMTU parses the mtu option +func ParseMTU(mtu string) (int, error) { + if mtu == "" { + return 0, nil // default + } + m, err := strconv.Atoi(mtu) + if err != nil { + return 0, err + } + if m < 0 { + return 0, errors.Errorf("mtu %d is less than zero", m) + } + return m, nil +} + +// ParseVlan parses the vlan option +func ParseVlan(vlan string) (int, error) { + if vlan == "" { + return 0, nil // default + } + v, err := strconv.Atoi(vlan) + if err != nil { + return 0, err + } + if v < 0 || v > 4094 { + return 0, errors.Errorf("vlan ID %d must be between 0 and 4094", v) + } + return v, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go new file mode 100644 index 00000000000..8138d9fbc42 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -0,0 +1,123 @@ +package util + +import ( + "errors" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/util" + "github.com/sirupsen/logrus" +) + +// GetBridgeInterfaceNames returns all bridge interface names +// already used by network configs +func GetBridgeInterfaceNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetUsedNetworkNames returns all network names already used +// by network configs +func GetUsedNetworkNames(n NetUtil) []string { + names := make([]string, 0, n.Len()) + n.ForEach(func(net types.Network) { + if net.Driver == types.BridgeNetworkDriver { + names = append(names, net.NetworkInterface) + } + }) + return names +} + +// GetFreeDeviceName returns a free device name which can +// be used for new configs as name and bridge interface name. +// The base name is suffixed by a number +func GetFreeDeviceName(n NetUtil) (string, error) { + bridgeNames := GetBridgeInterfaceNames(n) + netNames := GetUsedNetworkNames(n) + liveInterfaces, err := GetLiveNetworkNames() + if err != nil { + return "", nil + } + names := make([]string, 0, len(bridgeNames)+len(netNames)+len(liveInterfaces)) + names = append(names, bridgeNames...) + names = append(names, netNames...) + names = append(names, liveInterfaces...) + // FIXME: Is a limit fine? + // Start by 1, 0 is reserved for the default network + for i := 1; i < 1000000; i++ { + deviceName := fmt.Sprintf("%s%d", n.DefaultInterfaceName(), i) + if !util.StringInSlice(deviceName, names) { + logrus.Debugf("found free device name %s", deviceName) + return deviceName, nil + } + } + return "", errors.New("could not find free device name, to many iterations") +} + +// GetUsedSubnets returns a list of all used subnets by network +// configs and interfaces on the host. +func GetUsedSubnets(n NetUtil) ([]*net.IPNet, error) { + // first, load all used subnets from network configs + subnets := make([]*net.IPNet, 0, n.Len()) + n.ForEach(func(n types.Network) { + for i := range n.Subnets { + subnets = append(subnets, &n.Subnets[i].Subnet.IPNet) + } + }) + // second, load networks from the current system + liveSubnets, err := getLiveNetworkSubnets() + if err != nil { + return nil, err + } + return append(subnets, liveSubnets...), nil +} + +// GetFreeIPv4NetworkSubnet returns a unused ipv4 subnet +func GetFreeIPv4NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { + // the default podman network is 10.88.0.0/16 + // start locking for free /24 networks + network := &net.IPNet{ + IP: net.IP{10, 89, 0, 0}, + Mask: net.IPMask{255, 255, 255, 0}, + } + + // TODO: make sure to not use public subnets + for { + if intersectsConfig := NetworkIntersectsWithNetworks(network, usedNetworks); !intersectsConfig { + logrus.Debugf("found free ipv4 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: *network}, + }, nil + } + var err error + network, err = NextSubnet(network) + if err != nil { + return nil, err + } + } +} + +// GetFreeIPv6NetworkSubnet returns a unused ipv6 subnet +func GetFreeIPv6NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error) { + // FIXME: Is 10000 fine as limit? We should prevent an endless loop. + for i := 0; i < 10000; i++ { + // RFC4193: Choose the ipv6 subnet random and NOT sequentially. + network, err := getRandomIPv6Subnet() + if err != nil { + return nil, err + } + if intersectsConfig := NetworkIntersectsWithNetworks(&network, usedNetworks); !intersectsConfig { + logrus.Debugf("found free ipv6 network subnet %s", network.String()) + return &types.Subnet{ + Subnet: types.IPNet{IPNet: network}, + }, nil + } + } + return nil, errors.New("failed to get random ipv6 subnet") +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go new file mode 100644 index 00000000000..ac3934f8df3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -0,0 +1,124 @@ +package util + +import ( + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" +) + +// ValidateSubnet will validate a given Subnet. It checks if the +// given gateway and lease range are part of this subnet. If the +// gateway is empty and addGateway is true it will get the first +// available ip in the subnet assigned. +func ValidateSubnet(s *types.Subnet, addGateway bool, usedNetworks []*net.IPNet) error { + if s == nil { + return errors.New("subnet is nil") + } + if s.Subnet.IP == nil { + return errors.New("subnet ip is nil") + } + + // Reparse to ensure subnet is valid. + // Do not use types.ParseCIDR() because we want the ip to be + // the network address and not a random ip in the subnet. + _, n, err := net.ParseCIDR(s.Subnet.String()) + if err != nil { + return errors.Wrap(err, "subnet invalid") + } + + // check that the new subnet does not conflict with existing ones + if NetworkIntersectsWithNetworks(n, usedNetworks) { + return errors.Errorf("subnet %s is already used on the host or by another config", n.String()) + } + + s.Subnet = types.IPNet{IPNet: *n} + if s.Gateway != nil { + if !s.Subnet.Contains(s.Gateway) { + return errors.Errorf("gateway %s not in subnet %s", s.Gateway, &s.Subnet) + } + util.NormalizeIP(&s.Gateway) + } else if addGateway { + ip, err := util.FirstIPInSubnet(n) + if err != nil { + return err + } + s.Gateway = ip + } + + if s.LeaseRange != nil { + if s.LeaseRange.StartIP != nil { + if !s.Subnet.Contains(s.LeaseRange.StartIP) { + return errors.Errorf("lease range start ip %s not in subnet %s", s.LeaseRange.StartIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.StartIP) + } + if s.LeaseRange.EndIP != nil { + if !s.Subnet.Contains(s.LeaseRange.EndIP) { + return errors.Errorf("lease range end ip %s not in subnet %s", s.LeaseRange.EndIP, &s.Subnet) + } + util.NormalizeIP(&s.LeaseRange.EndIP) + } + } + return nil +} + +// ValidateSubnets will validate the subnets for this network. +// It also sets the gateway if the gateway is empty and addGateway is set to true +// IPv6Enabled to true if at least one subnet is ipv6. +func ValidateSubnets(network *types.Network, addGateway bool, usedNetworks []*net.IPNet) error { + for i := range network.Subnets { + err := ValidateSubnet(&network.Subnets[i], addGateway, usedNetworks) + if err != nil { + return err + } + if util.IsIPv6(network.Subnets[i].Subnet.IP) { + network.IPv6Enabled = true + } + } + return nil +} + +func ValidateSetupOptions(n NetUtil, namespacePath string, options types.SetupOptions) error { + if namespacePath == "" { + return errors.New("namespacePath is empty") + } + if options.ContainerID == "" { + return errors.New("ContainerID is empty") + } + if len(options.Networks) == 0 { + return errors.New("must specify at least one network") + } + for name, netOpts := range options.Networks { + netOpts := netOpts + network, err := n.Network(name) + if err != nil { + return err + } + err = validatePerNetworkOpts(network, &netOpts) + if err != nil { + return err + } + } + return nil +} + +// validatePerNetworkOpts checks that all given static ips are in a subnet on this network +func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOptions) error { + if netOpts.InterfaceName == "" { + return errors.Errorf("interface name on network %s is empty", network.Name) + } + if network.IPAMOptions["driver"] == types.HostLocalIPAMDriver { + outer: + for _, ip := range netOpts.StaticIPs { + for _, s := range network.Subnets { + if s.Subnet.Contains(ip) { + continue outer + } + } + return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.Name) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go new file mode 100644 index 00000000000..16b4e5c53b7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -0,0 +1,249 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "net" + "os" + "path/filepath" + "time" + + internalutil "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/stringid" + "github.com/pkg/errors" +) + +// NetworkCreate will take a partial filled Network and fill the +// missing fields. It creates the Network and returns the full Network. +// nolint:gocritic +func (n *netavarkNetwork) NetworkCreate(net types.Network) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + network, err := n.networkCreate(&net, false) + if err != nil { + return types.Network{}, err + } + // add the new network to the map + n.networks[network.Name] = network + return *network, nil +} + +func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (*types.Network, error) { + // if no driver is set use the default one + if newNetwork.Driver == "" { + newNetwork.Driver = types.DefaultNetworkDriver + } + if !defaultNet { + // FIXME: Should we use a different type for network create without the ID field? + // the caller is not allowed to set a specific ID + if newNetwork.ID != "" { + return nil, errors.Wrap(types.ErrInvalidArg, "ID can not be set for network create") + } + + // generate random network ID + var i int + for i = 0; i < 1000; i++ { + id := stringid.GenerateNonCryptoID() + if _, err := n.getNetwork(id); err != nil { + newNetwork.ID = id + break + } + } + if i == 1000 { + return nil, errors.New("failed to create random network ID") + } + } + + err := internalutil.CommonNetworkCreate(n, newNetwork) + if err != nil { + return nil, err + } + + // Only get the used networks for validation if we do not create the default network. + // The default network should not be validated against used subnets, we have to ensure + // that this network can always be created even when a subnet is already used on the host. + // This could happen if you run a container on this net, then the cni interface will be + // created on the host and "block" this subnet from being used again. + // Therefore the next podman command tries to create the default net again and it would + // fail because it thinks the network is used on the host. + var usedNetworks []*net.IPNet + if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver { + usedNetworks, err = internalutil.GetUsedSubnets(n) + if err != nil { + return nil, err + } + } + + switch newNetwork.Driver { + case types.BridgeNetworkDriver: + err = internalutil.CreateBridge(n, newNetwork, usedNetworks) + if err != nil { + return nil, err + } + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range newNetwork.Options { + switch key { + case "mtu": + _, err = internalutil.ParseMTU(value) + if err != nil { + return nil, err + } + + case "vlan": + _, err = internalutil.ParseVlan(value) + if err != nil { + return nil, err + } + + default: + return nil, errors.Errorf("unsupported bridge network option %s", key) + } + } + case types.MacVLANNetworkDriver: + err = createMacvlan(newNetwork) + if err != nil { + return nil, err + } + default: + return nil, errors.Wrapf(types.ErrInvalidArg, "unsupported driver %s", newNetwork.Driver) + } + + // add gatway when not internal or dns enabled + addGateway := !newNetwork.Internal || newNetwork.DNSEnabled + err = internalutil.ValidateSubnets(newNetwork, addGateway, usedNetworks) + if err != nil { + return nil, err + } + + newNetwork.Created = time.Now() + + if !defaultNet { + confPath := filepath.Join(n.networkConfigDir, newNetwork.Name+".json") + f, err := os.Create(confPath) + if err != nil { + return nil, err + } + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + err = enc.Encode(newNetwork) + if err != nil { + return nil, err + } + } + + return newNetwork, nil +} + +func createMacvlan(network *types.Network) error { + if network.Internal { + return errors.New("internal is not supported with macvlan") + } + if network.NetworkInterface != "" { + interfaceNames, err := internalutil.GetLiveNetworkNames() + if err != nil { + return err + } + if !util.StringInSlice(network.NetworkInterface, interfaceNames) { + return errors.Errorf("parent interface %s does not exist", network.NetworkInterface) + } + } + if len(network.Subnets) == 0 { + return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not supported with netavark") + } + network.IPAMOptions["driver"] = types.HostLocalIPAMDriver + + // validate the given options, we do not need them but just check to make sure they are valid + for key, value := range network.Options { + switch key { + case "mode": + if !util.StringInSlice(value, types.ValidMacVLANModes) { + return errors.Errorf("unknown macvlan mode %q", value) + } + case "mtu": + _, err := internalutil.ParseMTU(value) + if err != nil { + return err + } + default: + return errors.Errorf("unsupported macvlan network option %s", key) + } + } + return nil +} + +// NetworkRemove will remove the Network with the given name or ID. +// It does not ensure that the network is unused. +func (n *netavarkNetwork) NetworkRemove(nameOrID string) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return err + } + + // Removing the default network is not allowed. + if network.Name == n.defaultNetwork { + return errors.Errorf("default network %s cannot be removed", n.defaultNetwork) + } + + file := filepath.Join(n.networkConfigDir, network.Name+".json") + // make sure to not error for ErrNotExist + if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + delete(n.networks, network.Name) + return nil +} + +// NetworkList will return all known Networks. Optionally you can +// supply a list of filter functions. Only if a network matches all +// functions it is returned. +func (n *netavarkNetwork) NetworkList(filters ...types.FilterFunc) ([]types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + networks := make([]types.Network, 0, len(n.networks)) +outer: + for _, net := range n.networks { + for _, filter := range filters { + // All filters have to match, if one does not match we can skip to the next network. + if !filter(*net) { + continue outer + } + } + networks = append(networks, *net) + } + return networks, nil +} + +// NetworkInspect will return the Network with the given name or ID. +func (n *netavarkNetwork) NetworkInspect(nameOrID string) (types.Network, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return types.Network{}, err + } + + network, err := n.getNetwork(nameOrID) + if err != nil { + return types.Network{}, err + } + return *network, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go new file mode 100644 index 00000000000..9709315c6ca --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go @@ -0,0 +1,5 @@ +// +build linux + +package netavark + +const defaultBridgeName = "podman" diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go new file mode 100644 index 00000000000..1812b908435 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -0,0 +1,161 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "errors" + "io" + "os" + "os/exec" + "strconv" + + "github.com/sirupsen/logrus" +) + +type netavarkError struct { + exitCode int + // Set the json key to "error" so we can directly unmarshal into this struct + Msg string `json:"error"` + err error +} + +func (e *netavarkError) Error() string { + ec := "" + // only add the exit code the the error message if we have at least info log level + // the normal user does not need to care about the number + if e.exitCode > 0 && logrus.IsLevelEnabled(logrus.InfoLevel) { + ec = " (exit code " + strconv.Itoa(e.exitCode) + ")" + } + msg := "netavark" + ec + if len(msg) > 0 { + msg += ": " + e.Msg + } + if e.err != nil { + msg += ": " + e.err.Error() + } + return msg +} + +func (e *netavarkError) Unwrap() error { + return e.err +} + +func newNetavarkError(msg string, err error) error { + return &netavarkError{ + Msg: msg, + err: err, + } +} + +// Type to implement io.Writer interface +// This will write the logrus at info level +type logrusNetavarkWriter struct{} + +func (l *logrusNetavarkWriter) Write(b []byte) (int, error) { + logrus.Info("netavark: ", string(b)) + return len(b), nil +} + +// getRustLogEnv returns the RUST_LOG env var based on the current logrus level +func getRustLogEnv() string { + level := logrus.GetLevel().String() + // rust env_log uses warn instead of warning + if level == "warning" { + level = "warn" + } + // the rust netlink library is very verbose + // make sure to only log netavark logs + return "RUST_LOG=netavark=" + level +} + +// execNetavark will execute netavark with the following arguments +// It takes the path to the binary, the list of args and an interface which is +// marshaled to json and send via stdin to netavark. The result interface is +// used to marshal the netavark output into it. This can be nil. +// All errors return by this function should be of the type netavarkError +// to provide a helpful error message. +func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{}) error { + stdinR, stdinW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdin pipe", err) + } + stdinWClosed := false + defer func() { + stdinR.Close() + if !stdinWClosed { + stdinW.Close() + } + }() + + stdoutR, stdoutW, err := os.Pipe() + if err != nil { + return newNetavarkError("failed to create stdout pipe", err) + } + stdoutWClosed := false + defer func() { + stdoutR.Close() + if !stdoutWClosed { + stdoutW.Close() + } + }() + + // connect stderr to the podman stderr for logging + var logWriter io.Writer = os.Stderr + if n.syslog { + // connect logrus to stderr as well so that the logs will be written to the syslog as well + logWriter = io.MultiWriter(logWriter, &logrusNetavarkWriter{}) + } + + cmd := exec.Command(n.netavarkBinary, append(n.getCommonNetavarkOptions(), args...)...) + // connect the pipes to stdin and stdout + cmd.Stdin = stdinR + cmd.Stdout = stdoutW + cmd.Stderr = logWriter + // set the netavark log level to the same as the podman + cmd.Env = append(os.Environ(), getRustLogEnv()) + // if we run with debug log level lets also set RUST_BACKTRACE=1 so we can get the full stack trace in case of panics + if logrus.IsLevelEnabled(logrus.DebugLevel) { + cmd.Env = append(cmd.Env, "RUST_BACKTRACE=1") + } + + err = cmd.Start() + if err != nil { + return newNetavarkError("failed to start process", err) + } + err = json.NewEncoder(stdinW).Encode(stdin) + // we have to close stdinW so netavark gets the EOF and does not hang forever + stdinW.Close() + stdinWClosed = true + if err != nil { + return newNetavarkError("failed to encode stdin data", err) + } + + dec := json.NewDecoder(stdoutR) + + err = cmd.Wait() + // we have to close stdoutW so we can decode the json without hanging forever + stdoutW.Close() + stdoutWClosed = true + if err != nil { + exitError := &exec.ExitError{} + if errors.As(err, &exitError) { + ne := &netavarkError{} + // lets disallow unknown fields to make sure we do not get some unexpected stuff + dec.DisallowUnknownFields() + // this will unmarshal the error message into the error struct + ne.err = dec.Decode(ne) + ne.exitCode = exitError.ExitCode() + return ne + } + return newNetavarkError("unexpected failure during execution", err) + } + + if result != nil { + err = dec.Decode(result) + if err != nil { + return newNetavarkError("failed to decode result", err) + } + } + return nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go new file mode 100644 index 00000000000..f99d099cad3 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -0,0 +1,372 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/libnetwork/util" + "github.com/pkg/errors" + "go.etcd.io/bbolt" +) + +// IPAM boltdb structure +// Each network has their own bucket with the network name as bucket key. +// Inside the network bucket there is an ID bucket which maps the container ID (key) +// to a json array of ip addresses (value). +// The network bucket also has a bucket for each subnet, the subnet is used as key. +// Inside the subnet bucket an ip is used as key and the container ID as value. + +const ( + idBucket = "ids" + // lastIP this is used as key to store the last allocated ip + // note that this string should not be 4 or 16 byte long + lastIP = "lastIP" +) + +var ( + idBucketKey = []byte(idBucket) + lastIPKey = []byte(lastIP) +) + +type ipamError struct { + msg string + cause error +} + +func (e *ipamError) Error() string { + msg := "IPAM error" + if e.msg != "" { + msg += ": " + e.msg + } + if e.cause != nil { + msg += ": " + e.cause.Error() + } + return msg +} + +func newIPAMError(cause error, msg string, args ...interface{}) *ipamError { + return &ipamError{ + msg: fmt.Sprintf(msg, args...), + cause: cause, + } +} + +// openDB will open the ipam database +// Note that the caller has to Close it. +func (n *netavarkNetwork) openDB() (*bbolt.DB, error) { + // linter complains about the octal value + // nolint:gocritic + db, err := bbolt.Open(n.ipamDBPath, 0600, nil) + if err != nil { + return nil, newIPAMError(err, "failed to open database %s", n.ipamDBPath) + } + return db, nil +} + +// allocIPs will allocate ips for the the container. It will change the +// NetworkOptions in place. When static ips are given it will validate +// that these are free to use and will allocate them to the container. +func (n *netavarkNetwork) allocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + + // create/get network bucket + netBkt, err := tx.CreateBucketIfNotExists([]byte(netName)) + if err != nil { + return newIPAMError(err, "failed to create/get network bucket for network %s", netName) + } + + // requestIPs is the list of ips which should be used for this container + requestIPs := make([]net.IP, 0, len(network.Subnets)) + + for i := range network.Subnets { + subnetBkt, err := netBkt.CreateBucketIfNotExists([]byte(network.Subnets[i].Subnet.String())) + if err != nil { + return newIPAMError(err, "failed to create/get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if network.Subnets[i].Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + + // when static ip is requested for this network + if ip != nil { + // convert to 4 byte ipv4 if needed + util.NormalizeIP(&ip) + id := subnetBkt.Get(ip) + if id != nil { + return newIPAMError(nil, "requested ip address %s is already allocated to container ID %s", ip.String(), string(id)) + } + } else { + ip, err = getFreeIPFromBucket(subnetBkt, &network.Subnets[i]) + if err != nil { + return err + } + err = subnetBkt.Put(lastIPKey, ip) + if err != nil { + return newIPAMError(err, "failed to store last ip in database") + } + } + + err = subnetBkt.Put(ip, []byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to store ip in database") + } + + requestIPs = append(requestIPs, ip) + } + + idsBucket, err := netBkt.CreateBucketIfNotExists(idBucketKey) + if err != nil { + return newIPAMError(err, "failed to create/get id bucket for network %s", netName) + } + + ipsBytes, err := json.Marshal(requestIPs) + if err != nil { + return newIPAMError(err, "failed to marshal ips") + } + + err = idsBucket.Put([]byte(opts.ContainerID), ipsBytes) + if err != nil { + return newIPAMError(err, "failed to store ips in database") + } + + netOpts.StaticIPs = requestIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +func getFreeIPFromBucket(bucket *bbolt.Bucket, subnet *types.Subnet) (net.IP, error) { + var rangeStart net.IP + var rangeEnd net.IP + if subnet.LeaseRange != nil { + rangeStart = subnet.LeaseRange.StartIP + rangeEnd = subnet.LeaseRange.EndIP + } + + if rangeStart == nil { + // let start with the first ip in subnet + rangeStart = util.NextIP(subnet.Subnet.IP) + } + + if rangeEnd == nil { + lastIP, err := util.LastIPInSubnet(&subnet.Subnet.IPNet) + // this error should never happen but lets check anyways to prevent panics + if err != nil { + return nil, errors.Wrap(err, "failed to get lastIP") + } + // ipv4 uses the last ip in a subnet for broadcast so we cannot use it + if util.IsIPv4(lastIP) { + lastIP = util.PrevIP(lastIP) + } + rangeEnd = lastIP + } + + lastIPByte := bucket.Get(lastIPKey) + curIP := net.IP(lastIPByte) + if curIP == nil { + curIP = rangeStart + } else { + curIP = util.NextIP(curIP) + } + + // store the start ip to make sure we know when we looped over all available ips + startIP := curIP + + for { + // skip the gateway + if subnet.Gateway != nil { + if util.Cmp(curIP, subnet.Gateway) == 0 { + curIP = util.NextIP(curIP) + continue + } + } + + // if we are at the end we need to jump back to the start ip + if util.Cmp(curIP, rangeEnd) > 0 { + if util.Cmp(rangeStart, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + curIP = rangeStart + continue + } + + // check if ip is already used by another container + // if not return it + if bucket.Get(curIP) == nil { + return curIP, nil + } + + curIP = util.NextIP(curIP) + + if util.Cmp(curIP, startIP) == 0 { + return nil, newIPAMError(nil, "failed to find free IP in range: %s - %s", rangeStart.String(), rangeEnd.String()) + } + } +} + +// getAssignedIPs will read the ipam database and will fill in the used ips for this container. +// It will change the NetworkOptions in place. +func (n *netavarkNetwork) getAssignedIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.View(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + ipJSON := idBkt.Get([]byte(opts.ContainerID)) + if ipJSON == nil { + return newIPAMError(nil, "failed to get ips for container ID %s on network %s", opts.ContainerID, netName) + } + + // assignedIPs is the list of ips which should be used for this container + assignedIPs := make([]net.IP, 0, len(network.Subnets)) + + err = json.Unmarshal(ipJSON, &assignedIPs) + if err != nil { + return newIPAMError(err, "failed to unmarshal ips from database") + } + + for i := range assignedIPs { + util.NormalizeIP(&assignedIPs[i]) + } + + netOpts.StaticIPs = assignedIPs + opts.Networks[netName] = netOpts + } + return nil + }) + return err +} + +// deallocIPs will release the ips in the network options from the DB so that +// they can be reused by other containers. It expects that the network options +// are already filled with the correct ips. Use getAssignedIPs() for this. +func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error { + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + err = db.Update(func(tx *bbolt.Tx) error { + for netName, netOpts := range opts.Networks { + network := n.networks[netName] + if network == nil { + return newIPAMError(nil, "could not find network %q", netName) + } + + // check if we have to alloc ips + if !requiresIPAMAlloc(network) { + continue + } + // get network bucket + netBkt := tx.Bucket([]byte(netName)) + if netBkt == nil { + return newIPAMError(nil, "failed to get network bucket for network %s", netName) + } + + for _, subnet := range network.Subnets { + subnetBkt := netBkt.Bucket([]byte(subnet.Subnet.String())) + if subnetBkt == nil { + return newIPAMError(nil, "failed to get subnet bucket for network %s", netName) + } + + // search for a static ip which matches the current subnet + // in this case the user wants this one and we should not assign a free one + var ip net.IP + for _, staticIP := range netOpts.StaticIPs { + if subnet.Subnet.Contains(staticIP) { + ip = staticIP + break + } + } + if ip == nil { + return newIPAMError(nil, "failed to find ip for subnet %s on network %s", subnet.Subnet.String(), netName) + } + util.NormalizeIP(&ip) + + err = subnetBkt.Delete(ip) + if err != nil { + return newIPAMError(err, "failed to remove ip %s from subnet bucket for network %s", ip.String(), netName) + } + } + + idBkt := netBkt.Bucket(idBucketKey) + if idBkt == nil { + return newIPAMError(nil, "failed to get id bucket for network %s", netName) + } + + err = idBkt.Delete([]byte(opts.ContainerID)) + if err != nil { + return newIPAMError(err, "failed to remove allocated ips for container ID %s on network %s", opts.ContainerID, netName) + } + } + return nil + }) + return err +} + +// requiresIPAMAlloc return true when we have to allocate ips for this network +// it checks the ipam driver and if subnets are set +func requiresIPAMAlloc(network *types.Network) bool { + // only do host allocation when driver is set to HostLocalIPAMDriver or unset + switch network.IPAMOptions["driver"] { + case "", types.HostLocalIPAMDriver: + default: + return false + } + + // no subnets == no ips to assign + return len(network.Subnets) > 0 +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go new file mode 100644 index 00000000000..efea36fec2e --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -0,0 +1,309 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkNetwork struct { + // networkConfigDir is directory where the network config files are stored. + networkConfigDir string + + // networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc + networkRunDir string + + // tells netavark whether this is rootless mode or rootfull, "true" or "false" + networkRootless bool + + // netavarkBinary is the path to the netavark binary. + netavarkBinary string + // aardvarkBinary is the path to the aardvark binary. + aardvarkBinary string + + // defaultNetwork is the name for the default network. + defaultNetwork string + // defaultSubnet is the default subnet for the default network. + defaultSubnet types.IPNet + + // ipamDBPath is the path to the ip allocation bolt db + ipamDBPath string + + // syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + syslog bool + + // lock is a internal lock for critical operations + lock lockfile.Locker + + // modTime is the timestamp when the config dir was modified + modTime time.Time + + // networks is a map with loaded networks, the key is the network name + networks map[string]*types.Network +} + +type InitConfig struct { + // NetworkConfigDir is directory where the network config files are stored. + NetworkConfigDir string + + // NetavarkBinary is the path to the netavark binary. + NetavarkBinary string + // AardvarkBinary is the path to the aardvark binary. + AardvarkBinary string + + // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config + NetworkRunDir string + + // DefaultNetwork is the name for the default network. + DefaultNetwork string + // DefaultSubnet is the default subnet for the default network. + DefaultSubnet string + + // Syslog describes whenever the netavark debbug output should be log to the syslog as well. + // This will use logrus to do so, make sure logrus is set up to log to the syslog. + Syslog bool +} + +// NewNetworkInterface creates the ContainerNetwork interface for the netavark backend. +// Note: The networks are not loaded from disk until a method is called. +func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { + // TODO: consider using a shared memory lock + lock, err := lockfile.GetLockfile(filepath.Join(conf.NetworkConfigDir, "netavark.lock")) + if err != nil { + return nil, err + } + + defaultNetworkName := conf.DefaultNetwork + if defaultNetworkName == "" { + defaultNetworkName = types.DefaultNetworkName + } + + defaultSubnet := conf.DefaultSubnet + if defaultSubnet == "" { + defaultSubnet = types.DefaultSubnet + } + defaultNet, err := types.ParseCIDR(defaultSubnet) + if err != nil { + return nil, errors.Wrap(err, "failed to parse default subnet") + } + + if err := os.MkdirAll(conf.NetworkConfigDir, 0755); err != nil { + return nil, err + } + + if err := os.MkdirAll(conf.NetworkRunDir, 0755); err != nil { + return nil, err + } + + n := &netavarkNetwork{ + networkConfigDir: conf.NetworkConfigDir, + networkRunDir: conf.NetworkRunDir, + netavarkBinary: conf.NetavarkBinary, + aardvarkBinary: conf.AardvarkBinary, + networkRootless: unshare.IsRootless(), + ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), + defaultNetwork: defaultNetworkName, + defaultSubnet: defaultNet, + lock: lock, + syslog: conf.Syslog, + } + + return n, nil +} + +// Drivers will return the list of supported network drivers +// for this interface. +func (n *netavarkNetwork) Drivers() []string { + return []string{types.BridgeNetworkDriver, types.MacVLANNetworkDriver} +} + +// DefaultNetworkName will return the default netavark network name. +func (n *netavarkNetwork) DefaultNetworkName() string { + return n.defaultNetwork +} + +func (n *netavarkNetwork) loadNetworks() error { + // check the mod time of the config dir + f, err := os.Stat(n.networkConfigDir) + if err != nil { + return err + } + modTime := f.ModTime() + + // skip loading networks if they are already loaded and + // if the config dir was not modified since the last call + if n.networks != nil && modTime.Equal(n.modTime) { + return nil + } + // make sure the remove all networks before we reload them + n.networks = nil + n.modTime = modTime + + files, err := ioutil.ReadDir(n.networkConfigDir) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + + networks := make(map[string]*types.Network, len(files)) + for _, f := range files { + if f.IsDir() { + continue + } + if filepath.Ext(f.Name()) != ".json" { + continue + } + + path := filepath.Join(n.networkConfigDir, f.Name()) + file, err := os.Open(path) + if err != nil { + // do not log ENOENT errors + if !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Error loading network config file %q: %v", path, err) + } + continue + } + network := new(types.Network) + err = json.NewDecoder(file).Decode(network) + if err != nil { + logrus.Warnf("Error reading network config file %q: %v", path, err) + continue + } + + // check that the filename matches the network name + if network.Name+".json" != f.Name() { + logrus.Warnf("Network config name %q does not match file name %q, skipping", network.Name, f.Name()) + continue + } + + if !types.NameRegex.MatchString(network.Name) { + logrus.Warnf("Network config %q has invalid name: %q, skipping: %v", path, network.Name, types.RegexError) + continue + } + + err = parseNetwork(network) + if err != nil { + logrus.Warnf("Network config %q could not be parsed, skipping: %v", path, err) + continue + } + + logrus.Debugf("Successfully loaded network %s: %v", network.Name, network) + networks[network.Name] = network + } + + // create the default network in memory if it did not exists on disk + if networks[n.defaultNetwork] == nil { + networkInfo, err := n.createDefaultNetwork() + if err != nil { + return errors.Wrapf(err, "failed to create default network %s", n.defaultNetwork) + } + networks[n.defaultNetwork] = networkInfo + } + logrus.Debugf("Successfully loaded %d networks", len(networks)) + n.networks = networks + return nil +} + +func parseNetwork(network *types.Network) error { + if network.Labels == nil { + network.Labels = map[string]string{} + } + if network.Options == nil { + network.Options = map[string]string{} + } + if network.IPAMOptions == nil { + network.IPAMOptions = map[string]string{} + } + + if len(network.ID) != 64 { + return errors.Errorf("invalid network ID %q", network.ID) + } + + // add gatway when not internal or dns enabled + addGateway := !network.Internal || network.DNSEnabled + return util.ValidateSubnets(network, addGateway, nil) +} + +func (n *netavarkNetwork) createDefaultNetwork() (*types.Network, error) { + net := types.Network{ + Name: n.defaultNetwork, + NetworkInterface: defaultBridgeName + "0", + // Important do not change this ID + ID: "2f259bab93aaaaa2542ba43ef33eb990d0999ee1b9924b557b7be53c0b7a1bb9", + Driver: types.BridgeNetworkDriver, + Subnets: []types.Subnet{ + {Subnet: n.defaultSubnet}, + }, + } + return n.networkCreate(&net, true) +} + +// getNetwork will lookup a network by name or ID. It returns an +// error when no network was found or when more than one network +// with the given (partial) ID exists. +// getNetwork will read from the networks map, therefore the caller +// must ensure that n.lock is locked before using it. +func (n *netavarkNetwork) getNetwork(nameOrID string) (*types.Network, error) { + // fast path check the map key, this will only work for names + if val, ok := n.networks[nameOrID]; ok { + return val, nil + } + // If there was no match we might got a full or partial ID. + var net *types.Network + for _, val := range n.networks { + // This should not happen because we already looked up the map by name but check anyway. + if val.Name == nameOrID { + return val, nil + } + + if strings.HasPrefix(val.ID, nameOrID) { + if net != nil { + return nil, errors.Errorf("more than one result for network ID %s", nameOrID) + } + net = val + } + } + if net != nil { + return net, nil + } + return nil, errors.Wrapf(types.ErrNoSuchNetwork, "unable to find network with name or ID %s", nameOrID) +} + +// Implement the NetUtil interface for easy code sharing with other network interfaces. + +// ForEach call the given function for each network +func (n *netavarkNetwork) ForEach(run func(types.Network)) { + for _, val := range n.networks { + run(*val) + } +} + +// Len return the number of networks +func (n *netavarkNetwork) Len() int { + return len(n.networks) +} + +// DefaultInterfaceName return the default cni bridge name, must be suffixed with a number. +func (n *netavarkNetwork) DefaultInterfaceName() string { + return defaultBridgeName +} + +func (n *netavarkNetwork) Network(nameOrID string) (*types.Network, error) { + network, err := n.getNetwork(nameOrID) + if err != nil { + return nil, err + } + return network, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go new file mode 100644 index 00000000000..0a9dc370422 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -0,0 +1,132 @@ +// +build linux + +package netavark + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/containers/common/libnetwork/internal/util" + "github.com/containers/common/libnetwork/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type netavarkOptions struct { + types.NetworkOptions + Networks map[string]*types.Network `json:"network_info"` +} + +// Setup will setup the container network namespace. It returns +// a map of StatusBlocks, the key is the network name. +func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions) (map[string]types.StatusBlock, error) { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return nil, err + } + + err = util.ValidateSetupOptions(n, namespacePath, options) + if err != nil { + return nil, err + } + + // allocate IPs in the IPAM db + err = n.allocIPs(&options.NetworkOptions) + if err != nil { + return nil, err + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return nil, errors.Wrap(err, "failed to convert net opts") + } + + // trace output to get the json + if logrus.IsLevelEnabled(logrus.TraceLevel) { + b, err := json.Marshal(&netavarkOpts) + if err != nil { + return nil, err + } + // show the full netavark command so we can easily reproduce errors from the cli + logrus.Tracef("netavark command: printf '%s' | %s setup %s", string(b), n.netavarkBinary, namespacePath) + } + + result := map[string]types.StatusBlock{} + err = n.execNetavark([]string{"setup", namespacePath}, netavarkOpts, &result) + if err != nil { + // lets dealloc ips to prevent leaking + if err := n.deallocIPs(&options.NetworkOptions); err != nil { + logrus.Error(err) + } + return nil, err + } + + // make sure that the result makes sense + if len(result) != len(options.Networks) { + logrus.Errorf("unexpected netavark result: %v", result) + return nil, fmt.Errorf("unexpected netavark result length, want (%d), got (%d) networks", len(options.Networks), len(result)) + } + + return result, err +} + +// Teardown will teardown the container network namespace. +func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownOptions) error { + n.lock.Lock() + defer n.lock.Unlock() + err := n.loadNetworks() + if err != nil { + return err + } + + // get IPs from the IPAM db + err = n.getAssignedIPs(&options.NetworkOptions) + if err != nil { + // when there is an error getting the ips we should still continue + // to call teardown for netavark to prevent leaking network interfaces + logrus.Error(err) + } + + netavarkOpts, err := n.convertNetOpts(options.NetworkOptions) + if err != nil { + return errors.Wrap(err, "failed to convert net opts") + } + + retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil) + + // when netavark returned an error we still free the used ips + // otherwise we could end up in a state where block the ips forever + err = n.deallocIPs(&netavarkOpts.NetworkOptions) + if err != nil { + if retErr != nil { + logrus.Error(err) + } else { + retErr = err + } + } + + return retErr +} + +func (n *netavarkNetwork) getCommonNetavarkOptions() []string { + return []string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "--aardvark-binary=" + n.aardvarkBinary} +} + +func (n *netavarkNetwork) convertNetOpts(opts types.NetworkOptions) (*netavarkOptions, error) { + netavarkOptions := netavarkOptions{ + NetworkOptions: opts, + Networks: make(map[string]*types.Network, len(opts.Networks)), + } + + for network := range opts.Networks { + net, err := n.getNetwork(network) + if err != nil { + return nil, err + } + netavarkOptions.Networks[network] = net + } + return &netavarkOptions, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go new file mode 100644 index 00000000000..cd4fd89f159 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -0,0 +1,203 @@ +// +build linux + +package network + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/cni" + "github.com/containers/common/libnetwork/netavark" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" +) + +const ( + // defaultNetworkBackendFileName is the file name for sentinel file to store the backend + defaultNetworkBackendFileName = "defaultNetworkBackend" + // cniConfigDir is the directory where cni configuration is found + cniConfigDir = "/etc/cni/net.d/" + // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins + cniConfigDirRootless = "cni/net.d/" + // netavarkConfigDir is the config directory for the rootful network files + netavarkConfigDir = "/etc/containers/networks" + // netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db + netavarkRunDir = "/run/containers/networks" + + // netavarkBinary is the name of the netavark binary + netavarkBinary = "netavark" + // aardvarkBinary is the name of the aardvark binary + aardvarkBinary = "aardvark-dns" +) + +// NetworkBackend returns the network backend name and interface +// It returns either the CNI or netavark backend depending on what is set in the config. +// If the the backend is set to "" we will automatically assign the backend on the following conditions: +// 1. read ${graphroot}/defaultNetworkBackend +// 2. find netavark binary (if not installed use CNI) +// 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI +func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + backend := types.NetworkBackend(conf.Network.NetworkBackend) + if backend == "" { + var err error + backend, err = defaultNetworkBackend(store, conf) + if err != nil { + return "", nil, fmt.Errorf("failed to get default network backend: %w", err) + } + } + + switch backend { + case types.Netavark: + netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) + if err != nil { + return "", nil, err + } + + aardvarkBin, err := conf.FindHelperBinary(aardvarkBinary, false) + if err != nil { + // this is not a fatal error we can still use netavark without dns + logrus.Warnf("%s binary not found, container dns will not be enabled", aardvarkBin) + } + + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + confDir = getDefaultNetavarkConfigDir(store) + } + + // We cannot use the runroot for rootful since the network namespace is shared for all + // libpod instances they also have to share the same ipam db. + // For rootless we have our own network namespace per libpod instances, + // so this is not a problem there. + runDir := netavarkRunDir + if unshare.IsRootless() { + runDir = filepath.Join(store.RunRoot(), "networks") + } + + netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ + NetworkConfigDir: confDir, + NetworkRunDir: runDir, + NetavarkBinary: netavarkBin, + AardvarkBinary: aardvarkBin, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + Syslog: syslog, + }) + return types.Netavark, netInt, err + case types.CNI: + netInt, err := getCniInterface(conf) + return types.CNI, netInt, err + + default: + return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + } +} + +func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { + // read defaultNetworkBackend file + file := filepath.Join(store.GraphRoot(), defaultNetworkBackendFileName) + b, err := ioutil.ReadFile(file) + if err == nil { + val := string(b) + if val == string(types.Netavark) { + return types.Netavark, nil + } + if val == string(types.CNI) { + return types.CNI, nil + } + return "", fmt.Errorf("unknown network backend value %q in %q", val, file) + } + // fail for all errors except ENOENT + if !errors.Is(err, os.ErrNotExist) { + return "", fmt.Errorf("could not read network backend value: %w", err) + } + + // cache the network backend to make sure always the same one will be used + defer func() { + // only write when there is no error + if err == nil { + // nolint:gocritic + if err := ioutils.AtomicWriteFile(file, []byte(backend), 0644); err != nil { + logrus.Errorf("could not write network backend to file: %v", err) + } + } + }() + + _, err = conf.FindHelperBinary("netavark", false) + if err != nil { + // if we cannot find netavark use CNI + return types.CNI, nil + } + + // now check if there are already containers, images and CNI networks (new install?) + cons, err := store.Containers() + if err != nil { + return "", err + } + if len(cons) == 0 { + imgs, err := store.Images() + if err != nil { + return "", err + } + if len(imgs) == 0 { + cniInterface, err := getCniInterface(conf) + if err == nil { + nets, err := cniInterface.NetworkList() + // there is always a default network so check <= 1 + if err == nil && len(nets) <= 1 { + // we have a fresh system so use netavark + return types.Netavark, nil + } + } + } + } + return types.CNI, nil +} + +func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + var err error + confDir, err = getDefultCNIConfigDir() + if err != nil { + return nil, err + } + } + return cni.NewCNINetworkInterface(&cni.InitConfig{ + CNIConfigDir: confDir, + CNIPluginDirs: conf.Network.CNIPluginDirs, + DefaultNetwork: conf.Network.DefaultNetwork, + DefaultSubnet: conf.Network.DefaultSubnet, + IsMachine: conf.Engine.MachineEnabled, + }) +} + +func getDefultCNIConfigDir() (string, error) { + if !unshare.IsRootless() { + return cniConfigDir, nil + } + + configHome, err := homedir.GetConfigHome() + if err != nil { + return "", err + } + return filepath.Join(configHome, cniConfigDirRootless), nil +} + +// getDefaultNetavarkConfigDir return the netavark config dir. For rootful it will +// use "/etc/containers/networks" and for rootless "$graphroot/networks". We cannot +// use the graphroot for rootful since the network namespace is shared for all +// libpod instances. +func getDefaultNetavarkConfigDir(store storage.Store) string { + if !unshare.IsRootless() { + return netavarkConfigDir + } + return filepath.Join(store.GraphRoot(), "networks") +} diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go new file mode 100644 index 00000000000..b2d4a4538ec --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/const.go @@ -0,0 +1,47 @@ +package types + +const ( + // BridgeNetworkDriver defines the bridge driver + BridgeNetworkDriver = "bridge" + // DefaultNetworkDriver is the default network type used + DefaultNetworkDriver = BridgeNetworkDriver + // MacVLANNetworkDriver defines the macvlan driver + MacVLANNetworkDriver = "macvlan" + // MacVLANNetworkDriver defines the macvlan driver + IPVLANNetworkDriver = "ipvlan" + + // IPAM drivers + // HostLocalIPAMDriver store the ip + HostLocalIPAMDriver = "host-local" + // DHCPIPAMDriver get subnet and ip from dhcp server + DHCPIPAMDriver = "dhcp" + + // DefaultSubnet is the name that will be used for the default CNI network. + DefaultNetworkName = "podman" + // DefaultSubnet is the subnet that will be used for the default CNI network. + DefaultSubnet = "10.88.0.0/16" + + // valid macvlan driver mode values + MacVLANModeBridge = "bridge" + MacVLANModePrivate = "private" + MacVLANModeVepa = "vepa" + MacVLANModePassthru = "passthru" + + // valid ipvlan driver modes + IPVLANModeL2 = "l2" + IPVLANModeL3 = "l3" + IPVLANModeL3s = "l3s" +) + +type NetworkBackend string + +const ( + CNI NetworkBackend = "cni" + Netavark NetworkBackend = "netavark" +) + +// ValidMacVLANModes is the list of valid mode options for the macvlan driver +var ValidMacVLANModes = []string{MacVLANModeBridge, MacVLANModePrivate, MacVLANModeVepa, MacVLANModePassthru} + +// ValidIPVLANModes is the list of valid mode options for the ipvlan driver +var ValidIPVLANModes = []string{IPVLANModeL2, IPVLANModeL3, IPVLANModeL3s} diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go new file mode 100644 index 00000000000..d37e529dfe6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -0,0 +1,25 @@ +package types + +import ( + "regexp" + + "github.com/pkg/errors" +) + +var ( + // ErrNoSuchNetwork indicates the requested network does not exist + ErrNoSuchNetwork = errors.New("network not found") + + // ErrInvalidArg indicates that an invalid argument was passed + ErrInvalidArg = errors.New("invalid argument") + + // ErrNetworkExists indicates that a network with the given name already + // exists. + ErrNetworkExists = errors.New("network already exists") + + // NameRegex is a regular expression to validate names. + // This must NOT be changed. + NameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + // RegexError is thrown in presence of an invalid name. + RegexError = errors.Wrapf(ErrInvalidArg, "names must match [a-zA-Z0-9][a-zA-Z0-9_.-]*") +) diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go new file mode 100644 index 00000000000..de865537738 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -0,0 +1,282 @@ +package types + +import ( + "encoding/json" + "net" + "time" +) + +type ContainerNetwork interface { + // NetworkCreate will take a partial filled Network and fill the + // missing fields. It creates the Network and returns the full Network. + NetworkCreate(Network) (Network, error) + // NetworkRemove will remove the Network with the given name or ID. + NetworkRemove(nameOrID string) error + // NetworkList will return all known Networks. Optionally you can + // supply a list of filter functions. Only if a network matches all + // functions it is returned. + NetworkList(...FilterFunc) ([]Network, error) + // NetworkInspect will return the Network with the given name or ID. + NetworkInspect(nameOrID string) (Network, error) + + // Setup will setup the container network namespace. It returns + // a map of StatusBlocks, the key is the network name. + Setup(namespacePath string, options SetupOptions) (map[string]StatusBlock, error) + // Teardown will teardown the container network namespace. + Teardown(namespacePath string, options TeardownOptions) error + + // Drivers will return the list of supported network drivers + // for this interface. + Drivers() []string + + // DefaultNetworkName will return the default network name + // for this interface. + DefaultNetworkName() string +} + +// Network describes the Network attributes. +type Network struct { + // Name of the Network. + Name string `json:"name"` + // ID of the Network. + ID string `json:"id"` + // Driver for this Network, e.g. bridge, macvlan... + Driver string `json:"driver"` + // NetworkInterface is the network interface name on the host. + NetworkInterface string `json:"network_interface,omitempty"` + // Created contains the timestamp when this network was created. + Created time.Time `json:"created,omitempty"` + // Subnets to use for this network. + Subnets []Subnet `json:"subnets,omitempty"` + // IPv6Enabled if set to true an ipv6 subnet should be created for this net. + IPv6Enabled bool `json:"ipv6_enabled"` + // Internal is whether the Network should not have external routes + // to public or other Networks. + Internal bool `json:"internal"` + // DNSEnabled is whether name resolution is active for container on + // this Network. + DNSEnabled bool `json:"dns_enabled"` + // Labels is a set of key-value labels that have been applied to the + // Network. + Labels map[string]string `json:"labels,omitempty"` + // Options is a set of key-value options that have been applied to + // the Network. + Options map[string]string `json:"options,omitempty"` + // IPAMOptions contains options used for the ip assignment. + IPAMOptions map[string]string `json:"ipam_options,omitempty"` +} + +// IPNet is used as custom net.IPNet type to add Marshal/Unmarshal methods. +type IPNet struct { + net.IPNet +} + +// ParseCIDR parse a string to IPNet +func ParseCIDR(cidr string) (IPNet, error) { + ip, subnet, err := net.ParseCIDR(cidr) + if err != nil { + return IPNet{}, err + } + // convert to 4 bytes if ipv4 + ipv4 := ip.To4() + if ipv4 != nil { + ip = ipv4 + } + subnet.IP = ip + return IPNet{*subnet}, err +} + +func (n *IPNet) MarshalText() ([]byte, error) { + return []byte(n.String()), nil +} + +func (n *IPNet) UnmarshalText(text []byte) error { + subnet, err := ParseCIDR(string(text)) + if err != nil { + return err + } + *n = subnet + return nil +} + +// HardwareAddr is the same as net.HardwareAddr except +// that it adds the json marshal/unmarshal methods. +// This allows us to read the mac from a json string +// and a byte array. +// swagger:model MacAddress +type HardwareAddr net.HardwareAddr + +func (h *HardwareAddr) String() string { + return (*net.HardwareAddr)(h).String() +} + +func (h HardwareAddr) MarshalText() ([]byte, error) { + return []byte(h.String()), nil +} + +func (h *HardwareAddr) UnmarshalJSON(text []byte) error { + if len(text) == 0 { + *h = nil + return nil + } + + // if the json string start with a quote we got a string + // unmarshal the string and parse the mac from this string + if string(text[0]) == `"` { + var macString string + err := json.Unmarshal(text, &macString) + if err == nil { + mac, err := net.ParseMAC(macString) + if err == nil { + *h = HardwareAddr(mac) + return nil + } + } + } + // not a string or got an error fallback to the normal parsing + mac := make(net.HardwareAddr, 0, 6) + // use the standard json unmarshal for backwards compat + err := json.Unmarshal(text, &mac) + if err != nil { + return err + } + *h = HardwareAddr(mac) + return nil +} + +type Subnet struct { + // Subnet for this Network in CIDR form. + // swagger:strfmt string + Subnet IPNet `json:"subnet"` + // Gateway IP for this Network. + // swagger:strfmt string + Gateway net.IP `json:"gateway,omitempty"` + // LeaseRange contains the range where IP are leased. Optional. + LeaseRange *LeaseRange `json:"lease_range,omitempty"` +} + +// LeaseRange contains the range where IP are leased. +type LeaseRange struct { + // StartIP first IP in the subnet which should be used to assign ips. + // swagger:strfmt string + StartIP net.IP `json:"start_ip,omitempty"` + // EndIP last IP in the subnet which should be used to assign ips. + // swagger:strfmt string + EndIP net.IP `json:"end_ip,omitempty"` +} + +// StatusBlock contains the network information about a container +// connected to one Network. +type StatusBlock struct { + // Interfaces contains the created network interface in the container. + // The map key is the interface name. + Interfaces map[string]NetInterface `json:"interfaces,omitempty"` + // DNSServerIPs nameserver addresses which should be added to + // the containers resolv.conf file. + DNSServerIPs []net.IP `json:"dns_server_ips,omitempty"` + // DNSSearchDomains search domains which should be added to + // the containers resolv.conf file. + DNSSearchDomains []string `json:"dns_search_domains,omitempty"` +} + +// NetInterface contains the settings for a given network interface. +type NetInterface struct { + // Subnets list of assigned subnets with their gateway. + Subnets []NetAddress `json:"subnets,omitempty"` + // MacAddress for this Interface. + MacAddress HardwareAddr `json:"mac_address"` +} + +// NetAddress contains the ip address, subnet and gateway. +type NetAddress struct { + // IPNet of this NetAddress. Note that this is a subnet but it has to contain the + // actual ip of the network interface and not the network address. + IPNet IPNet `json:"ipnet"` + // Gateway for the network. This can be empty if there is no gateway, e.g. internal network. + Gateway net.IP `json:"gateway,omitempty"` +} + +// PerNetworkOptions are options which should be set on a per network basis. +type PerNetworkOptions struct { + // StaticIPs for this container. Optional. + StaticIPs []net.IP `json:"static_ips,omitempty"` + // Aliases contains a list of names which the dns server should resolve + // to this container. Should only be set when DNSEnabled is true on the Network. + // If aliases are set but there is no dns support for this network the + // network interface implementation should ignore this and NOT error. + // Optional. + Aliases []string `json:"aliases,omitempty"` + // StaticMac for this container. Optional. + StaticMAC HardwareAddr `json:"static_mac,omitempty"` + // InterfaceName for this container. Required in the backend. + // Optional in the frontend. Will be filled with ethX (where X is a integer) when empty. + InterfaceName string `json:"interface_name"` +} + +// NetworkOptions for a given container. +type NetworkOptions struct { + // ContainerID is the container id, used for iptables comments and ipam allocation. + ContainerID string `json:"container_id"` + // ContainerName is the container name, used as dns name. + ContainerName string `json:"container_name"` + // PortMappings contains the port mappings for this container + PortMappings []PortMapping `json:"port_mappings,omitempty"` + // Networks contains all networks with the PerNetworkOptions. + // The map should contain at least one element. + Networks map[string]PerNetworkOptions `json:"networks"` +} + +// PortMapping is one or more ports that will be mapped into the container. +type PortMapping struct { + // HostIP is the IP that we will bind to on the host. + // If unset, assumed to be 0.0.0.0 (all interfaces). + HostIP string `json:"host_ip"` + // ContainerPort is the port number that will be exposed from the + // container. + // Mandatory. + ContainerPort uint16 `json:"container_port"` + // HostPort is the port number that will be forwarded from the host into + // the container. + // If omitted, a random port on the host (guaranteed to be over 1024) + // will be assigned. + HostPort uint16 `json:"host_port"` + // Range is the number of ports that will be forwarded, starting at + // HostPort and ContainerPort and counting up. + // This is 1-indexed, so 1 is assumed to be a single port (only the + // Hostport:Containerport mapping will be added), 2 is two ports (both + // Hostport:Containerport and Hostport+1:Containerport+1), etc. + // If unset, assumed to be 1 (a single port). + // Both hostport + range and containerport + range must be less than + // 65536. + Range uint16 `json:"range"` + // Protocol is the protocol forward. + // Must be either "tcp", "udp", and "sctp", or some combination of these + // separated by commas. + // If unset, assumed to be TCP. + Protocol string `json:"protocol"` +} + +// OCICNIPortMapping maps to the standard CNI portmapping Capability. +// Deprecated: Do not use this struct for new fields. This only exists +// for backwards compatibility. +type OCICNIPortMapping struct { + // HostPort is the port number on the host. + HostPort int32 `json:"hostPort"` + // ContainerPort is the port number inside the sandbox. + ContainerPort int32 `json:"containerPort"` + // Protocol is the protocol of the port mapping. + Protocol string `json:"protocol"` + // HostIP is the host ip to use. + HostIP string `json:"hostIP"` +} + +type SetupOptions struct { + NetworkOptions +} + +type TeardownOptions struct { + NetworkOptions +} + +// FilterFunc can be passed to NetworkList to filter the networks. +type FilterFunc func(Network) bool diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go new file mode 100644 index 00000000000..b27ca1f9aa6 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -0,0 +1,80 @@ +package util + +import ( + "strings" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/filters" + "github.com/containers/common/pkg/util" + "github.com/pkg/errors" +) + +func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { + filterFunc, err := createFilterFuncs(key, filterValues) + if err != nil { + return nil, err + } + filterFuncs = append(filterFuncs, filterFunc) + } + return filterFuncs, nil +} + +func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, error) { + switch strings.ToLower(key) { + case "name": + // matches one name, regex allowed + return func(net types.Network) bool { + return util.StringMatchRegexSlice(net.Name, filterValues) + }, nil + + case "driver": + // matches network driver + return func(net types.Network) bool { + return util.StringInSlice(net.Driver, filterValues) + }, nil + + case "id": + // matches part of one id + return func(net types.Network) bool { + return util.StringMatchRegexSlice(net.ID, filterValues) + }, nil + + // TODO: add dns enabled, internal filter + } + return createPruneFilterFuncs(key, filterValues) +} + +func GenerateNetworkPruneFilters(f map[string][]string) ([]types.FilterFunc, error) { + filterFuncs := make([]types.FilterFunc, 0, len(f)) + for key, filterValues := range f { + filterFunc, err := createPruneFilterFuncs(key, filterValues) + if err != nil { + return nil, err + } + filterFuncs = append(filterFuncs, filterFunc) + } + return filterFuncs, nil +} + +func createPruneFilterFuncs(key string, filterValues []string) (types.FilterFunc, error) { + switch strings.ToLower(key) { + case "label": + // matches all labels + return func(net types.Network) bool { + return filters.MatchLabelFilters(filterValues, net.Labels) + }, nil + + case "until": + until, err := filters.ComputeUntilTimestamp(filterValues) + if err != nil { + return nil, err + } + return func(net types.Network) bool { + return net.Created.Before(until) + }, nil + default: + return nil, errors.Errorf("invalid filter %q", key) + } +} diff --git a/vendor/github.com/containers/common/libnetwork/util/ip.go b/vendor/github.com/containers/common/libnetwork/util/ip.go new file mode 100644 index 00000000000..7c315e31293 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip.go @@ -0,0 +1,56 @@ +package util + +import ( + "net" +) + +// IsIPv6 returns true if netIP is IPv6. +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +// IsIPv4 returns true if netIP is IPv4. +func IsIPv4(netIP net.IP) bool { + return netIP != nil && netIP.To4() != nil +} + +// LastIPInSubnet gets the last IP in a subnet +func LastIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + for i := range cidr.IP { + cidr.IP[i] |= ^cidr.Mask[i] + } + return cidr.IP, nil +} + +// FirstIPInSubnet gets the first IP in a subnet +func FirstIPInSubnet(addr *net.IPNet) (net.IP, error) { //nolint:interfacer + // re-parse to ensure clean network address + _, cidr, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, err + } + ones, bits := cidr.Mask.Size() + if ones == bits { + return cidr.IP, nil + } + cidr.IP[len(cidr.IP)-1]++ + return cidr.IP, nil +} + +// NormalizeIP will transform the given ip to the 4 byte len ipv4 if possible +func NormalizeIP(ip *net.IP) { + ipv4 := ip.To4() + if ipv4 != nil { + *ip = ipv4 + } +} diff --git a/vendor/github.com/containers/common/libnetwork/util/ip_calc.go b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go new file mode 100644 index 00000000000..a27ddf78bf7 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/util/ip_calc.go @@ -0,0 +1,53 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "math/big" + "net" +) + +// NextIP returns IP incremented by 1 +func NextIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Add(i, big.NewInt(1))) +} + +// PrevIP returns IP decremented by 1 +func PrevIP(ip net.IP) net.IP { + i := ipToInt(ip) + return intToIP(i.Sub(i, big.NewInt(1))) +} + +// Cmp compares two IPs, returning the usual ordering: +// a < b : -1 +// a == b : 0 +// a > b : 1 +func Cmp(a, b net.IP) int { + aa := ipToInt(a) + bb := ipToInt(b) + return aa.Cmp(bb) +} + +func ipToInt(ip net.IP) *big.Int { + if v := ip.To4(); v != nil { + return big.NewInt(0).SetBytes(v) + } + return big.NewInt(0).SetBytes(ip.To16()) +} + +func intToIP(i *big.Int) net.IP { + return net.IP(i.Bytes()) +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio.go b/vendor/github.com/containers/common/pkg/cgroups/blkio.go new file mode 100644 index 00000000000..bacd4eb9362 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio.go @@ -0,0 +1,149 @@ +package cgroups + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type blkioHandler struct { +} + +func getBlkioHandler() *blkioHandler { + return &blkioHandler{} +} + +// Apply set the specified constraints +func (c *blkioHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.BlockIO == nil { + return nil + } + return fmt.Errorf("blkio apply function not implemented yet") +} + +// Create the cgroup +func (c *blkioHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Blkio) +} + +// Destroy the cgroup +func (c *blkioHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Blkio)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *blkioHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var ioServiceBytesRecursive []BlkIOEntry + + if ctr.cgroup2 { + // more details on the io.stat file format:X https://facebookmicrosites.github.io/cgroup2/docs/io-controller.html + values, err := readCgroup2MapFile(ctr, "io.stat") + if err != nil { + return err + } + for k, v := range values { + d := strings.Split(k, ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + for _, item := range v { + d := strings.Split(item, "=") + if len(d) != 2 { + continue + } + op := d[0] + + // Accommodate the cgroup v1 naming + switch op { + case "rbytes": + op = "read" + case "wbytes": + op = "write" + } + + value, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + entry := BlkIOEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + } + } else { + BlkioRoot := ctr.getCgroupv1Path(Blkio) + + p := filepath.Join(BlkioRoot, "blkio.throttle.io_service_bytes_recursive") + f, err := os.Open(p) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "open %s", p) + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 3 { + continue + } + d := strings.Split(parts[0], ":") + if len(d) != 2 { + continue + } + minor, err := strconv.ParseUint(d[0], 10, 0) + if err != nil { + return err + } + major, err := strconv.ParseUint(d[1], 10, 0) + if err != nil { + return err + } + + op := parts[1] + + value, err := strconv.ParseUint(parts[2], 10, 0) + if err != nil { + return err + } + entry := BlkIOEntry{ + Op: op, + Major: major, + Minor: minor, + Value: value, + } + ioServiceBytesRecursive = append(ioServiceBytesRecursive, entry) + } + if err := scanner.Err(); err != nil { + return errors.Wrapf(err, "parse %s", p) + } + } + m.Blkio = BlkioMetrics{IoServiceBytesRecursive: ioServiceBytesRecursive} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go new file mode 100644 index 00000000000..0bf275f3851 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go @@ -0,0 +1,671 @@ +package cgroups + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containers/storage/pkg/unshare" + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // ErrCgroupDeleted means the cgroup was deleted + ErrCgroupDeleted = errors.New("cgroup deleted") + // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment + ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") + ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") +) + +// CgroupControl controls a cgroup hierarchy +type CgroupControl struct { + cgroup2 bool + path string + systemd bool + // List of additional cgroup subsystems joined that + // do not have a custom handler. + additionalControllers []controller +} + +// CPUUsage keeps stats for the CPU usage (unit: nanoseconds) +type CPUUsage struct { + Kernel uint64 + Total uint64 + PerCPU []uint64 +} + +// MemoryUsage keeps stats for the memory usage +type MemoryUsage struct { + Usage uint64 + Limit uint64 +} + +// CPUMetrics keeps stats for the CPU usage +type CPUMetrics struct { + Usage CPUUsage +} + +// BlkIOEntry describes an entry in the blkio stats +type BlkIOEntry struct { + Op string + Major uint64 + Minor uint64 + Value uint64 +} + +// BlkioMetrics keeps usage stats for the blkio cgroup controller +type BlkioMetrics struct { + IoServiceBytesRecursive []BlkIOEntry +} + +// MemoryMetrics keeps usage stats for the memory cgroup controller +type MemoryMetrics struct { + Usage MemoryUsage +} + +// PidsMetrics keeps usage stats for the pids cgroup controller +type PidsMetrics struct { + Current uint64 +} + +// Metrics keeps usage stats for the cgroup controllers +type Metrics struct { + CPU CPUMetrics + Blkio BlkioMetrics + Memory MemoryMetrics + Pids PidsMetrics +} + +type controller struct { + name string + symlink bool +} + +type controllerHandler interface { + Create(*CgroupControl) (bool, error) + Apply(*CgroupControl, *spec.LinuxResources) error + Destroy(*CgroupControl) error + Stat(*CgroupControl, *Metrics) error +} + +const ( + cgroupRoot = "/sys/fs/cgroup" + // CPU is the cpu controller + CPU = "cpu" + // CPUAcct is the cpuacct controller + CPUAcct = "cpuacct" + // CPUset is the cpuset controller + CPUset = "cpuset" + // Memory is the memory controller + Memory = "memory" + // Pids is the pids controller + Pids = "pids" + // Blkio is the blkio controller + Blkio = "blkio" +) + +var handlers map[string]controllerHandler + +func init() { + handlers = make(map[string]controllerHandler) + handlers[CPU] = getCPUHandler() + handlers[CPUset] = getCpusetHandler() + handlers[Memory] = getMemoryHandler() + handlers[Pids] = getPidsHandler() + handlers[Blkio] = getBlkioHandler() +} + +// getAvailableControllers get the available controllers +func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]controller, error) { + if cgroup2 { + controllers := []controller{} + controllersFile := cgroupRoot + "/cgroup.controllers" + // rootless cgroupv2: check available controllers for current user, systemd or servicescope will inherit + if unshare.IsRootless() { + userSlice, err := getCgroupPathForCurrentProcess() + if err != nil { + return controllers, err + } + // userSlice already contains '/' so not adding here + basePath := cgroupRoot + userSlice + controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) + } + controllersFileBytes, err := ioutil.ReadFile(controllersFile) + if err != nil { + return nil, errors.Wrapf(err, "failed while reading controllers for cgroup v2 from %q", controllersFile) + } + for _, controllerName := range strings.Fields(string(controllersFileBytes)) { + c := controller{ + name: controllerName, + symlink: false, + } + controllers = append(controllers, c) + } + return controllers, nil + } + + subsystems, _ := cgroupV1GetAllSubsystems() + controllers := []controller{} + // cgroupv1 and rootless: No subsystem is available: delegation is unsafe. + if unshare.IsRootless() { + return controllers, nil + } + + for _, name := range subsystems { + if _, found := exclude[name]; found { + continue + } + fileInfo, err := os.Stat(cgroupRoot + "/" + name) + if err != nil { + continue + } + c := controller{ + name: name, + symlink: !fileInfo.IsDir(), + } + controllers = append(controllers, c) + } + + return controllers, nil +} + +// GetAvailableControllers get string:bool map of all the available controllers +func GetAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) ([]string, error) { + availableControllers, err := getAvailableControllers(exclude, cgroup2) + if err != nil { + return nil, err + } + controllerList := []string{} + for _, controller := range availableControllers { + controllerList = append(controllerList, controller.name) + } + + return controllerList, nil +} + +func cgroupV1GetAllSubsystems() ([]string, error) { + f, err := os.Open("/proc/cgroups") + if err != nil { + return nil, err + } + defer f.Close() + + subsystems := []string{} + + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + if text[0] != '#' { + parts := strings.Fields(text) + if len(parts) >= 4 && parts[3] != "0" { + subsystems = append(subsystems, parts[0]) + } + } + } + if err := s.Err(); err != nil { + return nil, err + } + return subsystems, nil +} + +func getCgroupPathForCurrentProcess() (string, error) { + path := fmt.Sprintf("/proc/%d/cgroup", os.Getpid()) + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + + cgroupPath := "" + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + procEntries := strings.SplitN(text, "::", 2) + // set process cgroupPath only if entry is valid + if len(procEntries) > 1 { + cgroupPath = procEntries[1] + } + } + if err := s.Err(); err != nil { + return cgroupPath, err + } + return cgroupPath, nil +} + +// getCgroupv1Path is a helper function to get the cgroup v1 path +func (c *CgroupControl) getCgroupv1Path(name string) string { + return filepath.Join(cgroupRoot, name, c.path) +} + +// createCgroupv2Path creates the cgroupv2 path and enables all the available controllers +func createCgroupv2Path(path string) (deferredError error) { + if !strings.HasPrefix(path, cgroupRoot+"/") { + return fmt.Errorf("invalid cgroup path %s", path) + } + content, err := ioutil.ReadFile(cgroupRoot + "/cgroup.controllers") + if err != nil { + return err + } + ctrs := bytes.Fields(content) + res := append([]byte("+"), bytes.Join(ctrs, []byte(" +"))...) + + current := "/sys/fs" + elements := strings.Split(path, "/") + for i, e := range elements[3:] { + current = filepath.Join(current, e) + if i > 0 { + if err := os.Mkdir(current, 0755); err != nil { + if !os.IsExist(err) { + return err + } + } else { + // If the directory was created, be sure it is not left around on errors. + defer func() { + if deferredError != nil { + os.Remove(current) + } + }() + } + } + // We enable the controllers for all the path components except the last one. It is not allowed to add + // PIDs if there are already enabled controllers. + if i < len(elements[3:])-1 { + if err := ioutil.WriteFile(filepath.Join(current, "cgroup.subtree_control"), res, 0755); err != nil { + return err + } + } + } + return nil +} + +// initialize initializes the specified hierarchy +func (c *CgroupControl) initialize() (err error) { + createdSoFar := map[string]controllerHandler{} + defer func() { + if err != nil { + for name, ctr := range createdSoFar { + if err := ctr.Destroy(c); err != nil { + logrus.Warningf("error cleaning up controller %s for %s", name, c.path) + } + } + } + }() + if c.cgroup2 { + if err := createCgroupv2Path(filepath.Join(cgroupRoot, c.path)); err != nil { + return errors.Wrapf(err, "error creating cgroup path %s", c.path) + } + } + for name, handler := range handlers { + created, err := handler.Create(c) + if err != nil { + return err + } + if created { + createdSoFar[name] = handler + } + } + + if !c.cgroup2 { + // We won't need to do this for cgroup v2 + for _, ctr := range c.additionalControllers { + if ctr.symlink { + continue + } + path := c.getCgroupv1Path(ctr.name) + if err := os.MkdirAll(path, 0755); err != nil { + return errors.Wrapf(err, "error creating cgroup path for %s", ctr.name) + } + } + } + + return nil +} + +func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { + cPath := c.getCgroupv1Path(controller) + _, err := os.Stat(cPath) + if err == nil { + return false, nil + } + + if !os.IsNotExist(err) { + return false, err + } + + if err := os.MkdirAll(cPath, 0755); err != nil { + return false, errors.Wrapf(err, "error creating cgroup for %s", controller) + } + return true, nil +} + +func readFileAsUint64(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + v := cleanString(string(data)) + if v == "max" { + return math.MaxUint64, nil + } + ret, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ret, errors.Wrapf(err, "parse %s from %s", v, path) + } + return ret, nil +} + +func readFileByKeyAsUint64(path, key string) (uint64, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + for _, line := range strings.Split(string(content), "\n") { + fields := strings.SplitN(line, " ", 2) + if fields[0] == key { + v := cleanString(string(fields[1])) + if v == "max" { + return math.MaxUint64, nil + } + ret, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return ret, errors.Wrapf(err, "parse %s from %s", v, path) + } + return ret, nil + } + } + + return 0, fmt.Errorf("no key named %s from %s", key, path) +} + +// New creates a new cgroup control +func New(path string, resources *spec.LinuxResources) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + } + + if !cgroup2 { + controllers, err := getAvailableControllers(handlers, false) + if err != nil { + return nil, err + } + control.additionalControllers = controllers + } + + if err := control.initialize(); err != nil { + return nil, err + } + + return control, nil +} + +// NewSystemd creates a new cgroup control +func NewSystemd(path string) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + systemd: true, + } + return control, nil +} + +// Load loads an existing cgroup control +func Load(path string) (*CgroupControl, error) { + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return nil, err + } + control := &CgroupControl{ + cgroup2: cgroup2, + path: path, + systemd: false, + } + if !cgroup2 { + controllers, err := getAvailableControllers(handlers, false) + if err != nil { + return nil, err + } + control.additionalControllers = controllers + } + if !cgroup2 { + oneExists := false + // check that the cgroup exists at least under one controller + for name := range handlers { + p := control.getCgroupv1Path(name) + if _, err := os.Stat(p); err == nil { + oneExists = true + break + } + } + + // if there is no controller at all, raise an error + if !oneExists { + if unshare.IsRootless() { + return nil, ErrCgroupV1Rootless + } + // compatible with the error code + // used by containerd/cgroups + return nil, ErrCgroupDeleted + } + } + return control, nil +} + +// CreateSystemdUnit creates the systemd cgroup +func (c *CgroupControl) CreateSystemdUnit(path string) error { + if !c.systemd { + return fmt.Errorf("the cgroup controller is not using systemd") + } + + conn, err := systemdDbus.NewWithContext(context.TODO()) + if err != nil { + return err + } + defer conn.Close() + + return systemdCreate(path, conn) +} + +// GetUserConnection returns an user connection to D-BUS +func GetUserConnection(uid int) (*systemdDbus.Conn, error) { + return systemdDbus.NewConnection(func() (*dbus.Conn, error) { + return dbusAuthConnection(uid, dbus.SessionBusPrivate) + }) +} + +// CreateSystemdUserUnit creates the systemd cgroup for the specified user +func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { + if !c.systemd { + return fmt.Errorf("the cgroup controller is not using systemd") + } + + conn, err := GetUserConnection(uid) + if err != nil { + return err + } + defer conn.Close() + + return systemdCreate(path, conn) +} + +func dbusAuthConnection(uid int, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + if err := conn.Hello(); err != nil { + return nil, err + } + + return conn, nil +} + +// Delete cleans a cgroup +func (c *CgroupControl) Delete() error { + return c.DeleteByPath(c.path) +} + +// DeleteByPathConn deletes the specified cgroup path using the specified +// dbus connection if needed. +func (c *CgroupControl) DeleteByPathConn(path string, conn *systemdDbus.Conn) error { + if c.systemd { + return systemdDestroyConn(path, conn) + } + if c.cgroup2 { + return rmDirRecursively(filepath.Join(cgroupRoot, c.path)) + } + var lastError error + for _, h := range handlers { + if err := h.Destroy(c); err != nil { + lastError = err + } + } + + for _, ctr := range c.additionalControllers { + if ctr.symlink { + continue + } + p := c.getCgroupv1Path(ctr.name) + if err := rmDirRecursively(p); err != nil { + lastError = errors.Wrapf(err, "remove %s", p) + } + } + return lastError +} + +// DeleteByPath deletes the specified cgroup path +func (c *CgroupControl) DeleteByPath(path string) error { + if c.systemd { + conn, err := systemdDbus.NewWithContext(context.TODO()) + if err != nil { + return err + } + defer conn.Close() + return c.DeleteByPathConn(path, conn) + } + return c.DeleteByPathConn(path, nil) +} + +// Update updates the cgroups +func (c *CgroupControl) Update(resources *spec.LinuxResources) error { + for _, h := range handlers { + if err := h.Apply(c, resources); err != nil { + return err + } + } + return nil +} + +// AddPid moves the specified pid to the cgroup +func (c *CgroupControl) AddPid(pid int) error { + pidString := []byte(fmt.Sprintf("%d\n", pid)) + + if c.cgroup2 { + p := filepath.Join(cgroupRoot, c.path, "cgroup.procs") + if err := ioutil.WriteFile(p, pidString, 0644); err != nil { + return errors.Wrapf(err, "write %s", p) + } + return nil + } + + names := make([]string, 0, len(handlers)) + for n := range handlers { + names = append(names, n) + } + + for _, c := range c.additionalControllers { + if !c.symlink { + names = append(names, c.name) + } + } + + for _, n := range names { + // If we aren't using cgroup2, we won't write correctly to unified hierarchy + if !c.cgroup2 && n == "unified" { + continue + } + p := filepath.Join(c.getCgroupv1Path(n), "tasks") + if err := ioutil.WriteFile(p, pidString, 0644); err != nil { + return errors.Wrapf(err, "write %s", p) + } + } + return nil +} + +// Stat returns usage statistics for the cgroup +func (c *CgroupControl) Stat() (*Metrics, error) { + m := Metrics{} + found := false + for _, h := range handlers { + if err := h.Stat(c, &m); err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return nil, err + } + logrus.Warningf("Failed to retrieve cgroup stats: %v", err) + continue + } + found = true + } + if !found { + return nil, ErrStatCgroup + } + return &m, nil +} + +func readCgroup2MapPath(path string) (map[string][]string, error) { + ret := map[string][]string{} + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return ret, nil + } + return nil, errors.Wrapf(err, "open file %s", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + ret[parts[0]] = parts[1:] + } + if err := scanner.Err(); err != nil { + return nil, errors.Wrapf(err, "parsing file %s", path) + } + return ret, nil +} + +func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) { + p := filepath.Join(cgroupRoot, ctr.path, name) + + return readCgroup2MapPath(p) +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go new file mode 100644 index 00000000000..c1fe194b23a --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go @@ -0,0 +1,129 @@ +// +build linux + +package cgroups + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +var ( + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isUnified, isUnifiedErr +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + uid := os.Geteuid() + + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return false, err + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return false, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + + if len(parts) < 3 { + continue + } + + var cgroupPath string + + if cgroup2 { + cgroupPath = filepath.Join(cgroupRoot, parts[2]) + } else { + if parts[1] != "name=systemd" { + continue + } + cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) + } + + st, err := os.Stat(cgroupPath) + if err != nil { + return false, err + } + s := st.Sys() + if s == nil { + return false, fmt.Errorf("error stat cgroup path %s", cgroupPath) + } + + if int(s.(*syscall.Stat_t).Uid) != uid { + return false, nil + } + } + if err := scanner.Err(); err != nil { + return false, errors.Wrapf(err, "parsing file /proc/self/cgroup") + } + return true, nil +} + +// rmDirRecursively delete recursively a cgroup directory. +// It differs from os.RemoveAll as it doesn't attempt to unlink files. +// On cgroupfs we are allowed only to rmdir empty directories. +func rmDirRecursively(path string) error { + if err := os.Remove(path); err == nil || os.IsNotExist(err) { + return nil + } + entries, err := ioutil.ReadDir(path) + if err != nil { + return err + } + for _, i := range entries { + if i.IsDir() { + if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { + return err + } + } + } + + attempts := 0 + for { + err := os.Remove(path) + if err == nil || os.IsNotExist(err) { + return nil + } + if errors.Is(err, unix.EBUSY) { + // attempt up to 5 seconds if the cgroup is busy + if attempts < 500 { + time.Sleep(time.Millisecond * 10) + attempts++ + continue + } + } + return errors.Wrapf(err, "remove %s", path) + } +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go new file mode 100644 index 00000000000..95d42417021 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux + +package cgroups + +import ( + "os" +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + return false, nil +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + return false, nil +} + +func rmDirRecursively(path string) error { + return os.RemoveAll(path) +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu.go b/vendor/github.com/containers/common/pkg/cgroups/cpu.go new file mode 100644 index 00000000000..23539757d23 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cpu.go @@ -0,0 +1,160 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type cpuHandler struct { +} + +func getCPUHandler() *cpuHandler { + return &cpuHandler{} +} + +func cleanString(s string) string { + return strings.Trim(s, "\n") +} + +func readAcct(ctr *CgroupControl, name string) (uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + return readFileAsUint64(p) +} + +func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { + p := filepath.Join(ctr.getCgroupv1Path(CPUAcct), name) + data, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "reading %s", p) + } + r := []uint64{} + for _, s := range strings.Split(string(data), " ") { + s = cleanString(s) + if s == "" { + break + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing %s", s) + } + r = append(r, v) + } + return r, nil +} + +// Apply set the specified constraints +func (c *cpuHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.CPU == nil { + return nil + } + return fmt.Errorf("cpu apply not implemented yet") +} + +// Create the cgroup +func (c *cpuHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(CPU) +} + +// Destroy the cgroup +func (c *cpuHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(CPU)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *cpuHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var err error + usage := CPUUsage{} + if ctr.cgroup2 { + values, err := readCgroup2MapFile(ctr, "cpu.stat") + if err != nil { + return err + } + if val, found := values["usage_usec"]; found { + usage.Total, err = strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return err + } + usage.Kernel *= 1000 + } + if val, found := values["system_usec"]; found { + usage.Kernel, err = strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return err + } + usage.Total *= 1000 + } + // FIXME: How to read usage.PerCPU? + } else { + usage.Total, err = readAcct(ctr, "cpuacct.usage") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.Total = 0 + } + usage.Kernel, err = readAcct(ctr, "cpuacct.usage_sys") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.Kernel = 0 + } + usage.PerCPU, err = readAcctList(ctr, "cpuacct.usage_percpu") + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return err + } + usage.PerCPU = nil + } + } + m.CPU = CPUMetrics{Usage: usage} + return nil +} + +// GetSystemCPUUsage returns the system usage for all the cgroups +func GetSystemCPUUsage() (uint64, error) { + cgroupv2, err := IsCgroup2UnifiedMode() + if err != nil { + return 0, err + } + if !cgroupv2 { + p := filepath.Join(cgroupRoot, CPUAcct, "cpuacct.usage") + return readFileAsUint64(p) + } + + files, err := ioutil.ReadDir(cgroupRoot) + if err != nil { + return 0, err + } + var total uint64 + for _, file := range files { + if !file.IsDir() { + continue + } + p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat") + + values, err := readCgroup2MapPath(p) + if err != nil { + return 0, err + } + + if val, found := values["usage_usec"]; found { + v, err := strconv.ParseUint(cleanString(val[0]), 10, 64) + if err != nil { + return 0, err + } + total += v * 1000 + } + } + return total, nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go new file mode 100644 index 00000000000..22ac0a07968 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset.go @@ -0,0 +1,85 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + spec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type cpusetHandler struct { +} + +func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { + if dir == cgroupRoot { + return nil, fmt.Errorf("could not find parent to initialize cpuset %s", file) + } + path := filepath.Join(dir, file) + parentPath := path + if cgroupv2 { + parentPath = fmt.Sprintf("%s.effective", parentPath) + } + data, err := ioutil.ReadFile(parentPath) + if err != nil { + return nil, errors.Wrapf(err, "open %s", path) + } + if strings.Trim(string(data), "\n") != "" { + return data, nil + } + data, err = cpusetCopyFileFromParent(filepath.Dir(dir), file, cgroupv2) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, data, 0644); err != nil { + return nil, errors.Wrapf(err, "write %s", path) + } + return data, nil +} + +func cpusetCopyFromParent(path string, cgroupv2 bool) error { + for _, file := range []string{"cpuset.cpus", "cpuset.mems"} { + if _, err := cpusetCopyFileFromParent(path, file, cgroupv2); err != nil { + return err + } + } + return nil +} + +func getCpusetHandler() *cpusetHandler { + return &cpusetHandler{} +} + +// Apply set the specified constraints +func (c *cpusetHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.CPU == nil { + return nil + } + return fmt.Errorf("cpuset apply not implemented yet") +} + +// Create the cgroup +func (c *cpusetHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + path := filepath.Join(cgroupRoot, ctr.path) + return true, cpusetCopyFromParent(path, true) + } + + created, err := ctr.createCgroupDirectory(CPUset) + if !created || err != nil { + return created, err + } + return true, cpusetCopyFromParent(ctr.getCgroupv1Path(CPUset), false) +} + +// Destroy the cgroup +func (c *cpusetHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(CPUset)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *cpusetHandler) Stat(ctr *CgroupControl, m *Metrics) error { + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory.go b/vendor/github.com/containers/common/pkg/cgroups/memory.go new file mode 100644 index 00000000000..10d65893c6c --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/memory.go @@ -0,0 +1,66 @@ +package cgroups + +import ( + "fmt" + "path/filepath" + + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +type memHandler struct{} + +func getMemoryHandler() *memHandler { + return &memHandler{} +} + +// Apply set the specified constraints +func (c *memHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.Memory == nil { + return nil + } + return fmt.Errorf("memory apply not implemented yet") +} + +// Create the cgroup +func (c *memHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Memory) +} + +// Destroy the cgroup +func (c *memHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Memory)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *memHandler) Stat(ctr *CgroupControl, m *Metrics) error { + var err error + usage := MemoryUsage{} + + var memoryRoot string + var limitFilename string + + if ctr.cgroup2 { + memoryRoot = filepath.Join(cgroupRoot, ctr.path) + limitFilename = "memory.max" + if usage.Usage, err = readFileByKeyAsUint64(filepath.Join(memoryRoot, "memory.stat"), "anon"); err != nil { + return err + } + } else { + memoryRoot = ctr.getCgroupv1Path(Memory) + limitFilename = "memory.limit_in_bytes" + if usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil { + return err + } + } + + usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename)) + if err != nil { + return err + } + + m.Memory = MemoryMetrics{Usage: usage} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids.go b/vendor/github.com/containers/common/pkg/cgroups/pids.go new file mode 100644 index 00000000000..58cb32b3ba3 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/pids.go @@ -0,0 +1,69 @@ +package cgroups + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + spec "github.com/opencontainers/runtime-spec/specs-go" +) + +type pidHandler struct { +} + +func getPidsHandler() *pidHandler { + return &pidHandler{} +} + +// Apply set the specified constraints +func (c *pidHandler) Apply(ctr *CgroupControl, res *spec.LinuxResources) error { + if res.Pids == nil { + return nil + } + var PIDRoot string + + if ctr.cgroup2 { + PIDRoot = filepath.Join(cgroupRoot, ctr.path) + } else { + PIDRoot = ctr.getCgroupv1Path(Pids) + } + + p := filepath.Join(PIDRoot, "pids.max") + return ioutil.WriteFile(p, []byte(fmt.Sprintf("%d\n", res.Pids.Limit)), 0644) +} + +// Create the cgroup +func (c *pidHandler) Create(ctr *CgroupControl) (bool, error) { + if ctr.cgroup2 { + return false, nil + } + return ctr.createCgroupDirectory(Pids) +} + +// Destroy the cgroup +func (c *pidHandler) Destroy(ctr *CgroupControl) error { + return rmDirRecursively(ctr.getCgroupv1Path(Pids)) +} + +// Stat fills a metrics structure with usage stats for the controller +func (c *pidHandler) Stat(ctr *CgroupControl, m *Metrics) error { + if ctr.path == "" { + // nothing we can do to retrieve the pids.current path + return nil + } + + var PIDRoot string + if ctr.cgroup2 { + PIDRoot = filepath.Join(cgroupRoot, ctr.path) + } else { + PIDRoot = ctr.getCgroupv1Path(Pids) + } + + current, err := readFileAsUint64(filepath.Join(PIDRoot, "pids.current")) + if err != nil { + return err + } + + m.Pids = PidsMetrics{Current: current} + return nil +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go new file mode 100644 index 00000000000..92065a2d7cd --- /dev/null +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd.go @@ -0,0 +1,80 @@ +package cgroups + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" +) + +func systemdCreate(path string, c *systemdDbus.Conn) error { + slice, name := filepath.Split(path) + slice = strings.TrimSuffix(slice, "/") + + var lastError error + for i := 0; i < 2; i++ { + properties := []systemdDbus.Property{ + systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), + systemdDbus.PropWants(slice), + } + pMap := map[string]bool{ + "DefaultDependencies": false, + "MemoryAccounting": true, + "CPUAccounting": true, + "BlockIOAccounting": true, + } + if i == 0 { + pMap["Delegate"] = true + } + for k, v := range pMap { + p := systemdDbus.Property{ + Name: k, + Value: dbus.MakeVariant(v), + } + properties = append(properties, p) + } + + ch := make(chan string) + _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) + if err != nil { + lastError = err + continue + } + <-ch + return nil + } + return lastError +} + +/* + systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that + has the following license: + + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +func systemdDestroyConn(path string, c *systemdDbus.Conn) error { + name := filepath.Base(path) + + ch := make(chan string) + _, err := c.StopUnitContext(context.TODO(), name, "replace", ch) + if err != nil { + return err + } + <-ch + return nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index d5be77edd33..dd30abcd632 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -48,6 +48,18 @@ const ( BoltDBStateStore RuntimeStateStore = iota ) +// ProxyEnv is a list of Proxy Environment variables +var ProxyEnv = []string{ + "http_proxy", + "https_proxy", + "ftp_proxy", + "no_proxy", + "HTTP_PROXY", + "HTTPS_PROXY", + "FTP_PROXY", + "NO_PROXY", +} + // Config contains configuration options for container tools type Config struct { // Containers specify settings that configure how containers will run ont the system @@ -60,6 +72,8 @@ type Config struct { Network NetworkConfig `toml:"network"` // Secret section defines configurations for the secret management Secrets SecretConfig `toml:"secrets"` + // ConfigMap section defines configurations for the configmaps management + ConfigMaps ConfigMapConfig `toml:"configmaps"` } // ContainersConfig represents the "containers" TOML config table @@ -167,11 +181,6 @@ type ContainersConfig struct { // performance implications. PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"` - // RootlessNetworking depicts the "kind" of networking for rootless - // containers. Valid options are `slirp4netns` and `cni`. Default is - // `slirp4netns` on Linux, and `cni` on non-Linux OSes. - RootlessNetworking string `toml:"rootless_networking,omitempty"` - // SeccompProfile is the seccomp.json profile path which is used as the // default for the runtime. SeccompProfile string `toml:"seccomp_profile,omitempty"` @@ -215,6 +224,12 @@ type EngineConfig struct { // The first path pointing to a valid file will be used. ConmonPath []string `toml:"conmon_path,omitempty"` + // CompatAPIEnforceDockerHub enforces using docker.io for completing + // short names in Podman's compatibility REST API. Note that this will + // ignore unqualified-search-registries and short-name aliases defined + // in containers-registries.conf(5). + CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"` + // DetachKeys is the sequence of keys used to detach a container. DetachKeys string `toml:"detach_keys,omitempty"` @@ -414,6 +429,9 @@ type EngineConfig struct { // ChownCopiedFiles tells the container engine whether to chown files copied // into a container to the container's primary uid/gid. ChownCopiedFiles bool `toml:"chown_copied_files,omitempty"` + + // CompressionFormat is the compression format used to compress image layers. + CompressionFormat string `toml:"compression_format,omitempty"` } // SetOptions contains a subset of options in a Config. It's used to indicate if @@ -461,6 +479,10 @@ type SetOptions struct { // NetworkConfig represents the "network" TOML config table type NetworkConfig struct { + // NetworkBackend determines what backend should be used for Podman's + // networking. + NetworkBackend string `toml:"network_backend,omitempty"` + // CNIPluginDirs is where CNI plugin binaries are stored. CNIPluginDirs []string `toml:"cni_plugin_dirs,omitempty"` @@ -489,6 +511,17 @@ type SecretConfig struct { Opts map[string]string `toml:"opts,omitempty"` } +// ConfigMapConfig represents the "configmap" TOML config table +type ConfigMapConfig struct { + // Driver specifies the configmap driver to use. + // Current valid value: + // * file + // * pass + Driver string `toml:"driver,omitempty"` + // Opts contains driver specific options + Opts map[string]string `toml:"opts,omitempty"` +} + // MachineConfig represents the "machine" TOML config table type MachineConfig struct { // Number of CPU's a machine is created with. @@ -499,6 +532,8 @@ type MachineConfig struct { Image string `toml:"image,omitempty"` // Memory in MB a machine is created with. Memory uint64 `toml:"memory,omitempty,omitzero"` + // Username to use for rootless podman when init-ing a podman machine VM + User string `toml:"user,omitempty"` } // Destination represents destination for remote service @@ -559,6 +594,10 @@ func NewConfig(userConfigPath string) (*Config, error) { return nil, err } + if err := config.setupEnv(); err != nil { + return nil, err + } + return config, nil } @@ -574,7 +613,7 @@ func readConfigFromFile(path string, config *Config) error { } keys := meta.Undecoded() if len(keys) > 0 { - logrus.Warningf("Failed to decode the keys %q from %q.", keys, path) + logrus.Debugf("Failed to decode the keys %q from %q.", keys, path) } return nil @@ -791,21 +830,6 @@ func (c *ContainersConfig) Validate() error { // execution checks. It returns an `error` on validation failure, otherwise // `nil`. func (c *NetworkConfig) Validate() error { - expectedConfigDir := _cniConfigDir - if unshare.IsRootless() { - home, err := unshare.HomeDir() - if err != nil { - return err - } - expectedConfigDir = filepath.Join(home, _cniConfigDirRootless) - } - if c.NetworkConfigDir != expectedConfigDir { - err := isDirectory(c.NetworkConfigDir) - if err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "invalid network_config_dir: %s", c.NetworkConfigDir) - } - } - if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) { return nil } @@ -878,8 +902,7 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { if envHost { env = append(env, os.Environ()...) } else if httpProxy { - proxy := []string{"http_proxy", "https_proxy", "ftp_proxy", "no_proxy", "HTTP_PROXY", "HTTPS_PROXY", "FTP_PROXY", "NO_PROXY"} - for _, p := range proxy { + for _, p := range ProxyEnv { if val, ok := os.LookupEnv(p); ok { env = append(env, fmt.Sprintf("%s=%s", p, val)) } @@ -1142,7 +1165,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) { // FindHelperBinary will search the given binary name in the configured directories. // If searchPATH is set to true it will also search in $PATH. func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { - for _, path := range c.Engine.HelperBinariesDir { + dir_list := c.Engine.HelperBinariesDir + + // If set, search this directory first. This is used in testing. + if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found { + dir_list = append([]string{dir}, dir_list...) + } + + for _, path := range dir_list { fullpath := filepath.Join(path, name) if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { return fullpath, nil @@ -1151,13 +1181,14 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) if searchPATH { return exec.LookPath(name) } + configHint := "To resolve this error, set the helper_binaries_dir key in the `[engine]` section of containers.conf to the directory containing your helper binaries." if len(c.Engine.HelperBinariesDir) == 0 { - return "", errors.Errorf("could not find %q because there are no helper binary directories configured", name) + return "", errors.Errorf("could not find %q because there are no helper binary directories configured. %s", name, configHint) } - return "", errors.Errorf("could not find %q in one of %v", name, c.Engine.HelperBinariesDir) + return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint) } -// ImageCopyTmpDir default directory to store tempory image files during copy +// ImageCopyTmpDir default directory to store temporary image files during copy func (c *Config) ImageCopyTmpDir() (string, error) { if path, found := os.LookupEnv("TMPDIR"); found { return path, nil @@ -1175,3 +1206,23 @@ func (c *Config) ImageCopyTmpDir() (string, error) { return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) } + +// setupEnv sets the environment variables for the engine +func (c *Config) setupEnv() error { + for _, env := range c.Engine.Env { + splitEnv := strings.SplitN(env, "=", 2) + if len(splitEnv) != 2 { + logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) + continue + } + // skip if the env is already defined + if _, ok := os.LookupEnv(splitEnv[0]); ok { + logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", splitEnv[0]) + continue + } + if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 1d3c003e304..f497d2bbe35 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -197,10 +197,6 @@ default_sysctls = [ # #prepare_volume_on_create = false -# Indicates the networking to be used for rootless containers -# -#rootless_networking = "slirp4netns" - # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. # @@ -249,9 +245,6 @@ default_sysctls = [ # #volumes = [] -# The network table contains settings pertaining to the management of -# CNI plugins. - [secrets] #driver = "file" @@ -260,6 +253,16 @@ default_sysctls = [ [network] +# Network backend determines what network driver will be used to set up and tear down container networks. +# Valid values are "cni" and "netavark". +# The default value is empty which means that it will automatically choose CNI or netavark. If there are +# already containers/images or CNI networks preset it will choose CNI. +# +# Before changing this value all containers must be stopped otherwise it is likely that +# iptables rules and network interfaces might leak on the host. A reboot will fix this. +# +#network_backend = "" + # Path to directory where CNI plugin binaries are located. # #cni_plugin_dirs = [ @@ -270,18 +273,22 @@ default_sysctls = [ # "/opt/cni/bin", #] -# The network name of the default CNI network to attach pods to. +# The network name of the default network to attach pods to. # #default_network = "podman" -# The default subnet for the default CNI network given in default_network. +# The default subnet for the default network given in default_network. # If a network with that name does not exist, a new network using that name and # this subnet will be created. # Must be a valid IPv4 CIDR prefix. # #default_subnet = "10.88.0.0/16" -# Path to the directory where CNI configuration files are located. +# Path to the directory where network configuration files are located. +# For the CNI backend the default is "/etc/cni/net.d" as root +# and "$HOME/.config/cni/net.d" as rootless. +# For the netavark backend "/etc/containers/networks" is used as root +# and "$graphroot/networks" as rootless. # #network_config_dir = "/etc/cni/net.d/" @@ -290,6 +297,12 @@ default_sysctls = [ # #active_service = production +# The compression format to use when pushing an image. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# +#compression_format = "gzip" + + # Cgroup management implementation used for the runtime. # Valid options "systemd" or "cgroupfs" # @@ -313,6 +326,11 @@ default_sysctls = [ # "/usr/local/sbin/conmon" #] +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + # Specify the keys sequence used to detach a container. # Format is a single character [a-Z] or a comma separated sequence of # `ctrl-`, where `` is one of: @@ -336,6 +354,9 @@ default_sysctls = [ # #env = [] +# Define where event logs will be stored, when events_logger is "file". +#events_logfile_path="" + # Selects which logging mechanism to use for container engine events. # Valid values are `journald`, `file` and `none`. # @@ -378,14 +399,16 @@ default_sysctls = [ # Infra (pause) container image name for pod infra containers. When running a # pod, we start a `pause` process in a container to hold open the namespaces # associated with the pod. This container does nothing other then sleep, -# reserving the pods resources for the lifetime of the pod. +# reserving the pods resources for the lifetime of the pod. By default container +# engines run a builtin container using the pause executable. If you want override +# specify an image to pull. # -#infra_image = "k8s.gcr.io/pause:3.4.1" +#infra_image = "" # Specify the locking mechanism to use; valid values are "shm" and "file". # Change the default only if you are sure of what you are doing, in general # "file" is useful only on platforms where cgo is not available for using the -# faster "shm" lock type. You may need to run "podman system renumber" after +# faster "shm" lock type. You may need to run "podman system renumber" after # you change the lock type. # #lock_type** = "shm" @@ -444,7 +467,7 @@ default_sysctls = [ # #runtime = "crun" -# List of the OCI runtimes that support --format=json. When json is supported +# List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # #runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] @@ -457,8 +480,8 @@ default_sysctls = [ # #runtime_supports_nocgroups = ["crun", "krun"] -# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment -# variable. If you specify "storage", then the location of the +# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment +# variable. If you specify "storage", then the location of the # container/storage tmp directory will be used. # image_copy_tmp_dir="/var/tmp" @@ -572,6 +595,11 @@ default_sysctls = [ # #memory=2048 +# The username to use and create on the podman machine OS for rootless +# container access. +# +#user = "core" + # The [machine] table MUST be the last entry in this file. # (Unless another table is added) # TOML does not provide a way to end a table other than a further table being diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index e72e1b3e447..2791197497e 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -11,6 +11,7 @@ import ( "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" @@ -45,7 +46,7 @@ var ( // DefaultInitPath is the default path to the container-init binary DefaultInitPath = "/usr/libexec/podman/catatonit" // DefaultInfraImage to use for infra container - DefaultInfraImage = "k8s.gcr.io/pause:3.5" + DefaultInfraImage = "" // DefaultRootlessSHMLockPath is the default path for rootless SHM locks DefaultRootlessSHMLockPath = "/libpod_rootless_lock" // DefaultDetachKeys is the default keys sequence for detaching a @@ -93,10 +94,6 @@ const ( // InstallPrefix is the prefix where podman will be installed. // It can be overridden at build time. _installPrefix = "/usr" - // _cniConfigDir is the directory where cni configuration is found - _cniConfigDir = "/etc/cni/net.d/" - // _cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins - _cniConfigDirRootless = "cni/net.d/" // CgroupfsCgroupsManager represents cgroupfs native cgroup manager CgroupfsCgroupsManager = "cgroupfs" // DefaultApparmorProfile specifies the default apparmor profile for the container. @@ -140,8 +137,6 @@ func DefaultConfig() (*Config, error) { return nil, err } - cniConfig := _cniConfigDir - defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath if unshare.IsRootless() { configHome, err := homedir.GetConfigHome() @@ -155,7 +150,6 @@ func DefaultConfig() (*Config, error) { defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath } } - cniConfig = filepath.Join(configHome, _cniConfigDirRootless) } cgroupNS := "host" @@ -183,28 +177,27 @@ func DefaultConfig() (*Config, error) { "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", }, - EnvHost: false, - HTTPProxy: true, - Init: false, - InitPath: "", - IPCNS: "private", - LogDriver: defaultLogDriver(), - LogSizeMax: DefaultLogSizeMax, - NoHosts: false, - PidsLimit: DefaultPidsLimit, - PidNS: "private", - RootlessNetworking: getDefaultRootlessNetwork(), - ShmSize: DefaultShmSize, - TZ: "", - Umask: "0022", - UTSNS: "private", - UserNSSize: DefaultUserNSSize, + EnvHost: false, + HTTPProxy: true, + Init: false, + InitPath: "", + IPCNS: "private", + LogDriver: defaultLogDriver(), + LogSizeMax: DefaultLogSizeMax, + NetNS: "private", + NoHosts: false, + PidsLimit: DefaultPidsLimit, + PidNS: "private", + ShmSize: DefaultShmSize, + TZ: "", + Umask: "0022", + UTSNS: "private", + UserNSSize: DefaultUserNSSize, }, Network: NetworkConfig{ - DefaultNetwork: "podman", - DefaultSubnet: DefaultSubnet, - NetworkConfigDir: cniConfig, - CNIPluginDirs: DefaultCNIPluginDirs, + DefaultNetwork: "podman", + DefaultSubnet: DefaultSubnet, + CNIPluginDirs: DefaultCNIPluginDirs, }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -224,9 +217,10 @@ func defaultSecretConfig() SecretConfig { func defaultMachineConfig() MachineConfig { return MachineConfig{ CPUs: 1, - DiskSize: 10, - Image: "testing", + DiskSize: 100, + Image: getDefaultMachineImage(), Memory: 2048, + User: getDefaultMachineUser(), } } @@ -242,6 +236,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.EventsLogFilePath = filepath.Join(c.TmpDir, "events", "events.log") + c.CompatAPIEnforceDockerHub = true + if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { types.SetDefaultConfigFilePath(path) } @@ -366,7 +362,7 @@ func defaultTmpDir() (string, error) { return "/run/libpod", nil } - runtimeDir, err := getRuntimeDir() + runtimeDir, err := util.GetRuntimeDir() if err != nil { return "", err } @@ -569,9 +565,3 @@ func (c *Config) LogDriver() string { func (c *Config) MachineEnabled() bool { return c.Engine.MachineEnabled } - -// RootlessNetworking returns the "kind" of networking -// rootless containers should use -func (c *Config) RootlessNetworking() string { - return c.Containers.RootlessNetworking -} diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index c68c0b130fc..cc2d0fe3eb9 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -13,10 +13,15 @@ const ( oldMaxSize = uint64(1048576) ) -// getDefaultRootlessNetwork returns the default rootless network configuration. -// It is "slirp4netns" for Linux. -func getDefaultRootlessNetwork() string { - return "slirp4netns" +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" } // getDefaultProcessLimits returns the nproc for the current process in ulimits format diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index e38fb810de8..1aa7f6ef3d9 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -1,11 +1,16 @@ -// +build !linux +// +build !linux,!windows package config -// getDefaultRootlessNetwork returns the default rootless network configuration. -// It is "cni" for non-Linux OSes (to better support `podman-machine` usecases). -func getDefaultRootlessNetwork() string { - return "cni" +// getDefaultMachineImage returns the default machine image stream +// On Linux/Mac, this returns the FCOS stream +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "core" } // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go new file mode 100644 index 00000000000..28f102f1c50 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -0,0 +1,22 @@ +package config + +// getDefaultImage returns the default machine image stream +// On Windows this refers to the Fedora major release number +func getDefaultMachineImage() string { + return "35" +} + +// getDefaultMachineUser returns the user to use for rootless podman +func getDefaultMachineUser() string { + return "user" +} + +// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. +func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { + return false, nil +} + +// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format +func getDefaultProcessLimits() []string { + return []string{} +} diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go index 2a3b6fb35f1..f64b2dfc618 100644 --- a/vendor/github.com/containers/common/pkg/config/nosystemd.go +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -22,3 +22,7 @@ func defaultLogDriver() string { func useSystemd() bool { return false } + +func useJournald() bool { + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index fab3ea437a8..186e8b343c9 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -4,12 +4,12 @@ package config import ( "io/ioutil" + "path/filepath" "strings" "sync" "github.com/containers/common/pkg/cgroupv2" "github.com/containers/storage/pkg/unshare" - "github.com/coreos/go-systemd/v22/sdjournal" ) var ( @@ -67,12 +67,20 @@ func useJournald() bool { if !useSystemd() { return } - journal, err := sdjournal.NewJournal() - if err != nil { - return + for _, root := range []string{"/run/log/journal", "/var/log/journal"} { + dirs, err := ioutil.ReadDir(root) + if err != nil { + continue + } + for _, d := range dirs { + if d.IsDir() { + if _, err := ioutil.ReadDir(filepath.Join(root, d.Name())); err == nil { + usesJournald = true + return + } + } + } } - journal.Close() - usesJournald = true return }) return usesJournald diff --git a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go b/vendor/github.com/containers/common/pkg/defaultnet/default_network.go deleted file mode 100644 index 9b32241d65c..00000000000 --- a/vendor/github.com/containers/common/pkg/defaultnet/default_network.go +++ /dev/null @@ -1,222 +0,0 @@ -package defaultnet - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "regexp" - "text/template" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// TODO: A smarter implementation would make sure cni-podman0 was unused before -// making the default, and adjust if necessary -const networkTemplate = `{ - "cniVersion": "0.4.0", - "name": "{{{{.Name}}}}", - "plugins": [ - { - "type": "bridge", - "bridge": "cni-podman0", - "isGateway": true, - "ipMasq": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "routes": [{ "dst": "0.0.0.0/0" }], - "ranges": [ - [ - { - "subnet": "{{{{.Subnet}}}}", - "gateway": "{{{{.Gateway}}}}" - } - ] - ] - } - }, -{{{{- if (eq .Machine true) }}}} - { - "type": "podman-machine", - "capabilities": { - "portMappings": true - } - }, -{{{{- end}}}} - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - }, - { - "type": "firewall" - }, - { - "type": "tuning" - } - ] -} -` - -var ( - // Borrowed from Podman, modified to remove dashes and periods. - nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_]*$") -) - -// Used to pass info into the template engine -type networkInfo struct { - Name string - Subnet string - Gateway string - Machine bool -} - -// The most trivial definition of a CNI network possible for our use here. -// We need the name, and nothing else. -type network struct { - Name string `json:"name"` -} - -// Create makes the CNI default network, if necessary. -// Accepts the name and subnet of the network to create (a standard template -// will be used, with these values plugged in), the configuration directory -// where CNI configs are stored (to verify if a named configuration already -// exists), an exists directory (where a sentinel file will be stored, to ensure -// the network is only made once), and an isMachine bool (to determine whether -// the machine block will be added to the config). -// Create first checks if a default network has already been created via the -// presence of a sentinel file. If it does exist, it returns immediately without -// error. -// It next checks if a CNI network with the given name already exists. In that -// case, it creates the sentinel file and returns without error. -// If neither of these are true, the default network is created. -func Create(name, subnet, configDir, existsDir string, isMachine bool) error { - // TODO: Should probably regex name to make sure it's valid. - if name == "" || subnet == "" || configDir == "" || existsDir == "" { - return errors.Errorf("must provide values for all arguments to MakeDefaultNetwork") - } - if !nameRegex.MatchString(name) { - return errors.Errorf("invalid default network name %s - letters, numbers, and underscores only", name) - } - - sentinelFile := filepath.Join(existsDir, "defaultCNINetExists") - - // Check if sentinel file exists, return immediately if it does. - if _, err := os.Stat(sentinelFile); err == nil { - return nil - } - - // Create the sentinel file if it doesn't exist, so subsequent checks - // don't need to go further. - file, err := os.Create(sentinelFile) - if err != nil { - return err - } - file.Close() - - // We may need to make the config dir. - if err := os.MkdirAll(configDir, 0755); err != nil && !os.IsExist(err) { - return errors.Wrapf(err, "error creating CNI configuration directory") - } - - // Check all networks in the CNI conflist. - files, err := ioutil.ReadDir(configDir) - if err != nil { - return errors.Wrapf(err, "error reading CNI configuration directory") - } - if len(files) > 0 { - configPaths := make([]string, 0, len(files)) - for _, path := range files { - if !path.IsDir() && filepath.Ext(path.Name()) == ".conflist" { - configPaths = append(configPaths, filepath.Join(configDir, path.Name())) - } - } - for _, config := range configPaths { - configName, err := getConfigName(config) - if err != nil { - logrus.Errorf("Error reading CNI configuration file: %v", err) - continue - } - if configName == name { - return nil - } - } - } - - // We need to make the config. - // Get subnet and gateway. - _, ipNet, err := net.ParseCIDR(subnet) - if err != nil { - return errors.Wrapf(err, "default network subnet %s is invalid", subnet) - } - - ones, bits := ipNet.Mask.Size() - if ones == bits { - return errors.Wrapf(err, "default network subnet %s is to small", subnet) - } - gateway := make(net.IP, len(ipNet.IP)) - // copy the subnet ip to the gateway so we can modify it - copy(gateway, ipNet.IP) - // the default gateway should be the first ip in the subnet - gateway[len(gateway)-1]++ - - netInfo := new(networkInfo) - netInfo.Name = name - netInfo.Gateway = gateway.String() - netInfo.Subnet = ipNet.String() - netInfo.Machine = isMachine - - templ, err := template.New("network_template").Delims("{{{{", "}}}}").Parse(networkTemplate) - if err != nil { - return errors.Wrapf(err, "error compiling template for default network") - } - var output bytes.Buffer - if err := templ.Execute(&output, netInfo); err != nil { - return errors.Wrapf(err, "error executing template for default network") - } - - // Next, we need to place the config on disk. - // Loop through possible indexes, with a limit of 100 attempts. - created := false - for i := 87; i < 187; i++ { - configFile, err := os.OpenFile(filepath.Join(configDir, fmt.Sprintf("%d-%s.conflist", i, name)), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) - if err != nil { - logrus.Infof("Attempt to create default CNI network config file failed: %v", err) - continue - } - defer configFile.Close() - - created = true - - // Success - file is open. Write our buffer to it. - if _, err := configFile.Write(output.Bytes()); err != nil { - return errors.Wrapf(err, "error writing default CNI config to file") - } - break - } - if !created { - return errors.Errorf("no available default network configuration file was found") - } - - return nil -} - -// Get the name of the configuration contained in a given conflist file. Accepts -// the full path of a .conflist CNI configuration. -func getConfigName(file string) (string, error) { - contents, err := ioutil.ReadFile(file) - if err != nil { - return "", err - } - config := new(network) - if err := json.Unmarshal(contents, config); err != nil { - return "", errors.Wrapf(err, "error decoding CNI configuration %s", filepath.Base(file)) - } - return config.Name, nil -} diff --git a/vendor/github.com/containers/common/pkg/download/download.go b/vendor/github.com/containers/common/pkg/download/download.go new file mode 100644 index 00000000000..abf4c87739e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/download/download.go @@ -0,0 +1,31 @@ +package download + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// FromURL downloads the specified source to a file in tmpdir (OS defaults if +// empty). +func FromURL(tmpdir, source string) (string, error) { + tmp, err := ioutil.TempFile(tmpdir, "") + if err != nil { + return "", fmt.Errorf("creating temporary download file: %w", err) + } + defer tmp.Close() + + response, err := http.Get(source) // nolint:noctx + if err != nil { + return "", fmt.Errorf("downloading %s: %w", source, err) + } + defer response.Body.Close() + + _, err = io.Copy(tmp, response.Body) + if err != nil { + return "", fmt.Errorf("copying %s to %s: %w", source, tmp.Name(), err) + } + + return tmp.Name(), nil +} diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index ea9495ee73a..5c2836893a5 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -74,6 +74,7 @@ func Create() List { }, oci: v1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, }, } } @@ -373,6 +374,7 @@ func FromBlob(manifestBytes []byte) (List, error) { }, oci: v1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: v1.MediaTypeImageIndex, }, } switch manifestType { diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 02e670c50c8..5d826e80514 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -14,9 +14,27 @@ import ( // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) ([]string, error) { - var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int finalOpts := make([]string, 0, len(options)) for _, opt := range options { + // support advanced options like upperdir=/path, workdir=/path + if strings.Contains(opt, "upperdir") { + foundUpperDir++ + if foundUpperDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + if strings.Contains(opt, "workdir") { + foundWorkDir++ + if foundWorkDir > 1 { + return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", ")) + } + finalOpts = append(finalOpts, opt) + continue + } + switch opt { case "noexec", "exec": foundExec++ @@ -66,6 +84,7 @@ func ValidateVolumeOpts(options []string) ([]string, error) { // are intended to be always safe to use, even not on OS // X). continue + case "idmap": default: return nil, errors.Errorf("invalid option type %q", opt) } diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go index 43e3a668882..a9573e4e8a8 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry.go +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -45,8 +45,12 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions func isRetryable(err error) bool { err = errors.Cause(err) - if err == context.Canceled || err == context.DeadlineExceeded { + switch err { + case nil: return false + case context.Canceled, context.DeadlineExceeded: + return false + default: // continue } type unwrapper interface { @@ -57,7 +61,8 @@ func isRetryable(err error) bool { case errcode.Error: switch e.Code { - case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied, + errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown: return false } return true @@ -86,7 +91,7 @@ func isRetryable(err error) bool { } } return true - case unwrapper: + case unwrapper: // Test this last, because various error types might implement .Unwrap() err = e.Unwrap() return isRetryable(err) } diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go index cf333744c53..d196384f033 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -80,6 +80,7 @@ func DefaultProfile() *Seccomp { "vmsplice", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, }, @@ -574,6 +575,7 @@ func DefaultProfile() *Seccomp { "open_by_handle_at", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -609,6 +611,7 @@ func DefaultProfile() *Seccomp { "setns", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -630,6 +633,7 @@ func DefaultProfile() *Seccomp { "chroot", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -657,6 +661,7 @@ func DefaultProfile() *Seccomp { "query_module", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -678,6 +683,7 @@ func DefaultProfile() *Seccomp { "acct", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -707,6 +713,7 @@ func DefaultProfile() *Seccomp { "ptrace", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -730,6 +737,7 @@ func DefaultProfile() *Seccomp { "ioperm", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -757,6 +765,7 @@ func DefaultProfile() *Seccomp { "clock_settime64", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -778,6 +787,7 @@ func DefaultProfile() *Seccomp { "vhangup", }, Action: ActErrno, + Errno: "EPERM", ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ @@ -789,6 +799,7 @@ func DefaultProfile() *Seccomp { "socket", }, Action: ActErrno, + Errno: "EINVAL", ErrnoRet: &einval, Args: []*Arg{ { @@ -867,6 +878,7 @@ func DefaultProfile() *Seccomp { return &Seccomp{ DefaultAction: ActErrno, + DefaultErrno: "ENOSYS", DefaultErrnoRet: &enosys, ArchMap: arches(), Syscalls: syscalls, diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go new file mode 100644 index 00000000000..a1009012df3 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go @@ -0,0 +1,93 @@ +// +build linux,seccomp + +package seccomp + +import ( + "golang.org/x/sys/unix" +) + +// Error table +var errnoArch = map[string]uint{ + "EPERM": uint(unix.EPERM), + "ENOENT": uint(unix.ENOENT), + "ESRCH": uint(unix.ESRCH), + "EIO": uint(unix.EIO), + "ENXIO": uint(unix.ENXIO), + "E2BIG": uint(unix.E2BIG), + "ENOEXEC": uint(unix.ENOEXEC), + "EBADF": uint(unix.EBADF), + "ECHILD": uint(unix.ECHILD), + "EDEADLK": uint(unix.EDEADLK), + "ENOMEM": uint(unix.ENOMEM), + "EACCES": uint(unix.EACCES), + "EFAULT": uint(unix.EFAULT), + "ENOTBLK": uint(unix.ENOTBLK), + "EBUSY": uint(unix.EBUSY), + "EEXIST": uint(unix.EEXIST), + "EXDEV": uint(unix.EXDEV), + "ENODEV": uint(unix.ENODEV), + "ENOTDIR": uint(unix.ENOTDIR), + "EISDIR": uint(unix.EISDIR), + "EINVAL": uint(unix.EINVAL), + "ENFILE": uint(unix.ENFILE), + "EMFILE": uint(unix.EMFILE), + "ENOTTY": uint(unix.ENOTTY), + "ETXTBSY": uint(unix.ETXTBSY), + "EFBIG": uint(unix.EFBIG), + "ENOSPC": uint(unix.ENOSPC), + "ESPIPE": uint(unix.ESPIPE), + "EROFS": uint(unix.EROFS), + "EMLINK": uint(unix.EMLINK), + "EPIPE": uint(unix.EPIPE), + "EDOM": uint(unix.EDOM), + "ERANGE": uint(unix.ERANGE), + "EAGAIN": uint(unix.EAGAIN), + "EINPROGRESS": uint(unix.EINPROGRESS), + "EALREADY": uint(unix.EALREADY), + "ENOTSOCK": uint(unix.ENOTSOCK), + "EDESTADDRREQ": uint(unix.EDESTADDRREQ), + "EMSGSIZE": uint(unix.EMSGSIZE), + "EPROTOTYPE": uint(unix.EPROTOTYPE), + "ENOPROTOOPT": uint(unix.ENOPROTOOPT), + "EPROTONOSUPPORT": uint(unix.EPROTONOSUPPORT), + "ESOCKTNOSUPPORT": uint(unix.ESOCKTNOSUPPORT), + "EOPNOTSUPP": uint(unix.EOPNOTSUPP), + "EPFNOSUPPORT": uint(unix.EPFNOSUPPORT), + "EAFNOSUPPORT": uint(unix.EAFNOSUPPORT), + "EADDRINUSE": uint(unix.EADDRINUSE), + "EADDRNOTAVAIL": uint(unix.EADDRNOTAVAIL), + "ENETDOWN": uint(unix.ENETDOWN), + "ENETUNREACH": uint(unix.ENETUNREACH), + "ENETRESET": uint(unix.ENETRESET), + "ECONNABORTED": uint(unix.ECONNABORTED), + "ECONNRESET": uint(unix.ECONNRESET), + "ENOBUFS": uint(unix.ENOBUFS), + "EISCONN": uint(unix.EISCONN), + "ENOTCONN": uint(unix.ENOTCONN), + "ESHUTDOWN": uint(unix.ESHUTDOWN), + "ETOOMANYREFS": uint(unix.ETOOMANYREFS), + "ETIMEDOUT": uint(unix.ETIMEDOUT), + "ECONNREFUSED": uint(unix.ECONNREFUSED), + "ELOOP": uint(unix.ELOOP), + "ENAMETOOLONG": uint(unix.ENAMETOOLONG), + "EHOSTDOWN": uint(unix.EHOSTDOWN), + "EHOSTUNREACH": uint(unix.EHOSTUNREACH), + "ENOTEMPTY": uint(unix.ENOTEMPTY), + "EUSERS": uint(unix.EUSERS), + "EDQUOT": uint(unix.EDQUOT), + "ESTALE": uint(unix.ESTALE), + "EREMOTE": uint(unix.EREMOTE), + "ENOLCK": uint(unix.ENOLCK), + "ENOSYS": uint(unix.ENOSYS), + "EILSEQ": uint(unix.EILSEQ), + "ENOMEDIUM": uint(unix.ENOMEDIUM), + "EMEDIUMTYPE": uint(unix.EMEDIUMTYPE), + "EOVERFLOW": uint(unix.EOVERFLOW), + "ECANCELED": uint(unix.ECANCELED), + "EIDRM": uint(unix.EIDRM), + "ENOMSG": uint(unix.ENOMSG), + "ENOTSUP": uint(unix.ENOTSUP), + "EBADMSG": uint(unix.EBADMSG), + "ENOTRECOVERABLE": uint(unix.ENOTRECOVERABLE), + "EOWNERDEAD": uint(unix.EOWNERDEAD), +} diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json index c009134e3cc..9314eb3cc5e 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -1,6 +1,7 @@ { "defaultAction": "SCMP_ACT_ERRNO", "defaultErrnoRet": 38, + "defaultErrno": "ENOSYS", "archMap": [ { "architecture": "SCMP_ARCH_X86_64", @@ -87,7 +88,8 @@ "comment": "", "includes": {}, "excludes": {}, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -650,7 +652,8 @@ "CAP_DAC_READ_SEARCH" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -693,7 +696,8 @@ "CAP_SYS_ADMIN" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -722,7 +726,8 @@ "CAP_SYS_CHROOT" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -757,7 +762,8 @@ "CAP_SYS_MODULE" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -786,7 +792,8 @@ "CAP_SYS_PACCT" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -823,7 +830,8 @@ "CAP_SYS_PTRACE" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -854,7 +862,8 @@ "CAP_SYS_RAWIO" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -889,7 +898,8 @@ "CAP_SYS_TIME" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -918,7 +928,8 @@ "CAP_SYS_TTY_CONFIG" ] }, - "errnoRet": 1 + "errnoRet": 1, + "errno": "EPERM" }, { "names": [ @@ -946,7 +957,8 @@ "CAP_AUDIT_WRITE" ] }, - "errnoRet": 22 + "errnoRet": 22, + "errno": "EINVAL" }, { "names": [ diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go index af36b9990b5..0c022ac7ab9 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "strconv" "github.com/opencontainers/runtime-spec/specs-go" libseccomp "github.com/seccomp/libseccomp-golang" @@ -66,6 +67,37 @@ func inSlice(slice []string, s string) bool { return false } +func getArchitectures(config *Seccomp, newConfig *specs.LinuxSeccomp) error { + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + return nil +} + +func getErrno(errno string, def *uint) (*uint, error) { + if errno == "" { + return def, nil + } + v, err := strconv.ParseUint(errno, 10, 32) + if err == nil { + v2 := uint(v) + return &v2, nil + } + + v2, found := errnoArch[errno] + if !found { + return nil, fmt.Errorf("unknown errno %s", errno) + } + return &v2, nil +} + func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { if config == nil { return nil, nil @@ -84,15 +116,8 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) arch = native.String() } - if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { - return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") - } - - // if config.Architectures == 0 then libseccomp will figure out the architecture to use - if len(config.Architectures) != 0 { - for _, a := range config.Architectures { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) - } + if err := getArchitectures(config, newConfig); err != nil { + return nil, err } if len(config.ArchMap) != 0 { @@ -111,7 +136,11 @@ func setupSeccomp(config *Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) } newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) - newConfig.DefaultErrnoRet = config.DefaultErrnoRet + + newConfig.DefaultErrnoRet, err = getErrno(config.DefaultErrno, config.DefaultErrnoRet) + if err != nil { + return nil, err + } Loop: // Loop through all syscall blocks and convert them to libcontainer format after filtering them @@ -145,12 +174,17 @@ Loop: return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") } + errno, err := getErrno(call.Errno, call.ErrnoRet) + if err != nil { + return nil, err + } + if call.Name != "" { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, call.ErrnoRet)) + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall([]string{call.Name}, call.Action, call.Args, errno)) } if len(call.Names) > 0 { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, call.ErrnoRet)) + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Names, call.Action, call.Args, errno)) } } diff --git a/vendor/github.com/containers/common/pkg/seccomp/types.go b/vendor/github.com/containers/common/pkg/seccomp/types.go index 07751f72907..a8a9e9d4f7c 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/types.go +++ b/vendor/github.com/containers/common/pkg/seccomp/types.go @@ -6,8 +6,12 @@ package seccomp // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { - DefaultAction Action `json:"defaultAction"` + DefaultAction Action `json:"defaultAction"` + + // DefaultErrnoRet is obsolete, please use DefaultErrno DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"` + DefaultErrno string `json:"defaultErrno,omitempty"` + // Architectures is kept to maintain backward compatibility with the old // seccomp profile. Architectures []Arch `json:"architectures,omitempty"` @@ -107,5 +111,7 @@ type Syscall struct { Comment string `json:"comment"` Includes Filter `json:"includes"` Excludes Filter `json:"excludes"` - ErrnoRet *uint `json:"errnoRet,omitempty"` + // ErrnoRet is obsolete, please use Errno + ErrnoRet *uint `json:"errnoRet,omitempty"` + Errno string `json:"errno,omitempty"` } diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go index 22aacb1ce9e..846bd5c177d 100644 --- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go @@ -36,7 +36,7 @@ type driverConfig struct { LookupCommand string `mapstructure:"lookup"` // StoreCommand contains a shell command that stores a secret. // The secret id is provided as environment variable SECRET_ID - // The secret value itself is provied over stdin + // The secret value itself is provided over stdin StoreCommand string `mapstructure:"store"` } diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index 6c9321e7357..3c0d2b237d2 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -149,14 +149,15 @@ func getMountsMap(path string) (string, string, error) { //nolint // MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem // mountLabel: MAC/SELinux label for container content -// containerWorkingDir: Private data for storing subscriptions on the host mounted in container. +// containerRunDir: Private data for storing subscriptions on the host mounted in container. // mountFile: Additional mount points required for the container. -// mountPoint: Container image mountpoint +// mountPoint: Container image mountpoint, or the directory from the hosts perspective that +// corresponds to `/` in the container. // uid: to assign to content created for subscriptions // gid: to assign to content created for subscriptions // rootless: indicates whether container is running in rootless mode // disableFips: indicates whether system should ignore fips mode -func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { +func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount { var ( subscriptionMounts []rspec.Mount mountFiles []string @@ -174,7 +175,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str } for _, file := range mountFiles { if _, err := os.Stat(file); err == nil { - mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerWorkingDir, uid, gid) + mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerRunDir, uid, gid) if err != nil { logrus.Warnf("Failed to mount subscriptions, skipping entry in %s: %v", file, err) } @@ -191,7 +192,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str _, err := os.Stat("/etc/system-fips") switch { case err == nil: - if err := addFIPSModeSubscription(&subscriptionMounts, containerWorkingDir, mountPoint, mountLabel, uid, gid); err != nil { + if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { logrus.Errorf("Adding FIPS mode subscription to container: %v", err) } case os.IsNotExist(err): @@ -210,7 +211,7 @@ func rchown(chowndir string, uid, gid int) error { // addSubscriptionsFromMountsFile copies the contents of host directory to container directory // and returns a list of mounts -func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir string, uid, gid int) ([]rspec.Mount, error) { +func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) { var mounts []rspec.Mount defaultMountsPaths := getMounts(filePath) for _, path := range defaultMountsPaths { @@ -228,7 +229,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st return nil, err } - ctrDirOrFileOnHost := filepath.Join(containerWorkingDir, ctrDirOrFile) + ctrDirOrFileOnHost := filepath.Join(containerRunDir, ctrDirOrFile) // In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost _, err = os.Stat(ctrDirOrFileOnHost) @@ -300,13 +301,17 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st return mounts, nil } -// addFIPSModeSubscription creates /run/secrets/system-fips in the container -// root filesystem if /etc/system-fips exists on hosts. -// This enables the container to be FIPS compliant and run openssl in -// FIPS mode as the host is also in FIPS mode. -func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPoint, mountLabel string, uid, gid int) error { +// addFIPSModeSubscription adds mounts to the `mounts` slice that are needed for the container to run openssl in FIPs mode +// (i.e: be FIPs compliant). +// It should only be called if /etc/system-fips exists on host. +// It primarily does two things: +// - creates /run/secrets/system-fips in the container root filesystem, and adds it to the `mounts` slice. +// - If `/etc/crypto-policies/back-ends` already exists inside of the container, it creates +// `/usr/share/crypto-policies/back-ends/FIPS` inside the container as well. +// It is done from within the container to ensure to avoid policy incompatibility between the container and host. +func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { subscriptionsDir := "/run/secrets" - ctrDirOnHost := filepath.Join(containerWorkingDir, subscriptionsDir) + ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) { if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint return err @@ -322,7 +327,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPo if err != nil { return errors.Wrap(err, "creating system-fips file in container for FIPS mode") } - defer file.Close() + file.Close() } if !mountExists(*mounts, subscriptionsDir) { diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go new file mode 100644 index 00000000000..98890a686f8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -0,0 +1,24 @@ +package util + +import "regexp" + +// StringInSlice determines if a string is in a string slice, returns bool +func StringInSlice(s string, sl []string) bool { + for _, i := range sl { + if i == s { + return true + } + } + return false +} + +// StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool +func StringMatchRegexSlice(s string, re []string) bool { + for _, r := range re { + m, err := regexp.MatchString(r, s) + if err == nil && m { + return true + } + } + return false +} diff --git a/vendor/github.com/containers/common/pkg/config/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go similarity index 95% rename from vendor/github.com/containers/common/pkg/config/util_supported.go rename to vendor/github.com/containers/common/pkg/util/util_supported.go index 33e4a9e8fc2..422e28742bc 100644 --- a/vendor/github.com/containers/common/pkg/config/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -1,6 +1,6 @@ // +build linux darwin -package config +package util import ( "fmt" @@ -19,8 +19,8 @@ var ( rootlessRuntimeDir string ) -// getRuntimeDir returns the runtime directory -func getRuntimeDir() (string, error) { +// GetRuntimeDir returns the runtime directory +func GetRuntimeDir() (string, error) { var rootlessRuntimeDirError error rootlessRuntimeDirOnce.Do(func() { diff --git a/vendor/github.com/containers/common/pkg/config/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go similarity index 76% rename from vendor/github.com/containers/common/pkg/config/util_windows.go rename to vendor/github.com/containers/common/pkg/util/util_windows.go index 995301f5df0..2add712f19d 100644 --- a/vendor/github.com/containers/common/pkg/config/util_windows.go +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -1,12 +1,12 @@ // +build windows -package config +package util import ( "github.com/pkg/errors" ) // getRuntimeDir returns the runtime directory -func getRuntimeDir() (string, error) { +func GetRuntimeDir() (string, error) { return "", errors.New("this function is not implemented for windows") } diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index b6ceabce531..2088e955295 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.46.1-dev" +const Version = "0.47.5" diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index e1649ba8e18..0501fb3c115 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -15,8 +15,10 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/imagedestination" + "github.com/containers/image/v5/internal/imagesource" "github.com/containers/image/v5/internal/pkg/platform" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/compression" @@ -63,8 +65,8 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource + dest private.ImageDestination + rawSource private.ImageSource reportWriter io.Writer progressOutput io.Writer progressInterval time.Duration @@ -80,13 +82,13 @@ type copier struct { // imageCopier tracks state specific to a single image (possibly an item of a manifest list) type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool - canSubstituteBlobs bool - ociEncryptLayers *[]int + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + ociEncryptLayers *[]int } const ( @@ -124,15 +126,20 @@ type ImageListSelection int type Options struct { RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`. ReportWriter io.Writer SourceCtx *types.SystemContext DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + + // Preserve digests, and fail if we cannot. + PreserveDigests bool // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type ForceManifestMIMEType string ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. // The encryption options is derived from the construction of EncryptConfig object. // Note: During initial encryption process of a layer, the resultant digest is not known @@ -197,20 +204,22 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, reportWriter = options.ReportWriter } - dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) + publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef)) } + dest := imagedestination.FromPublic(publicDest) defer func() { if err := dest.Close(); err != nil { retErr = errors.Wrapf(retErr, " (dest: %v)", err) } }() - rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) + publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef)) } + rawSource := imagesource.FromPublic(publicRawSource) defer func() { if err := rawSource.Close(); err != nil { retErr = errors.Wrapf(retErr, " (src: %v)", err) @@ -410,7 +419,36 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference())) } } - canModifyManifestList := (len(sigs) == 0) + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, errors.Wrapf(err, "computing digest of source image's manifest") + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyOneImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } // Determine if we'll need to convert the manifest list to a different format. forceListMIMEType := options.ForceManifestMIMEType @@ -425,8 +463,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur return nil, errors.Wrapf(err, "determining manifest list type to write to destination") } if selectedListType != originalList.MIMEType() { - if !canModifyManifestList { - return nil, errors.Errorf("manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) } } @@ -510,8 +548,8 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // If we can't just use the original value, but we have to change it, flag an error. if !bytes.Equal(attemptedManifestList, originalManifestList) { - if !canModifyManifestList { - return nil, errors.Errorf(" manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType) + if cannotModifyManifestListReason != "" { + return nil, errors.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) } logrus.Debugf("Manifest list has been updated") } else { @@ -536,7 +574,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur // Sign the manifest list. if options.SignBy != "" { - newSig, err := c.createSignature(manifestList, options.SignBy) + newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase) if err != nil { return nil, err } @@ -629,13 +667,27 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } } + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + ic := imageCopier{ c: c, manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, // diffIDsAreNeeded is computed later - canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, - ociEncryptLayers: options.OciEncryptLayers, + cannotModifyManifestReason: cannotModifyManifestReason, + ociEncryptLayers: options.OciEncryptLayers, } // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: @@ -643,7 +695,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == "" if err := ic.updateEmbeddedDockerReference(); err != nil { return nil, "", "", err @@ -710,10 +762,10 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. - // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. - if !ic.canModifyManifest { - return nil, "", "", errors.Wrap(err, "Writing manifest failed (and converting it is not possible, image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return nil, "", "", errors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason) } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. @@ -744,7 +796,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli } if options.SignBy != "" { - newSig, err := c.createSignature(manifestBytes, options.SignBy) + newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase) if err != nil { return nil, "", "", err } @@ -813,9 +865,9 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error { return nil // No reference embedded in the manifest, or it matches destRef already. } - if !ic.canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which is not possible (image is signed or the destination specifies a digest)", - transports.ImageName(ic.c.dest.Reference()), destRef.String()) + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) } ic.manifestUpdates.EmbeddedDockerReference = destRef return nil @@ -833,7 +885,7 @@ func isTTY(w io.Writer) bool { return false } -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() numLayers := len(srcInfos) @@ -844,8 +896,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfosUpdated := false // If we only need to check authorization, no updates required. if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Copying this image requires changing layer representation, which is not possible (image is signed or the destination specifies a digest)") + if ic.cannotModifyManifestReason != "" { + return errors.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) } srcInfos = updatedSrcInfos srcInfosUpdated = true @@ -975,8 +1027,8 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool { func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { pendingImage := ic.src if !ic.noPendingManifestUpdates() { - if !ic.canModifyManifest { - return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + if ic.cannotModifyManifestReason != "" { + return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) } if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. @@ -1011,7 +1063,8 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc instanceDigest = &manifestDigest } if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - return nil, "", errors.Wrapf(err, "writing manifest %q", string(man)) + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", errors.Wrapf(err, "writing manifest") } return man, manifestDigest, nil } @@ -1025,20 +1078,15 @@ func (c *copier) newProgressPool() *mpb.Progress { return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) } -// customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar -func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorator { - producer := func(filler interface{}) decor.DecorFunc { - return func(s decor.Statistics) string { - if s.Total == 0 { - pairFmt := "%.1f / %.1f (skipped: %.1f)" - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) - } - pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" - percentage := 100.0 * float64(s.Refill) / float64(s.Total) - return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) - } +// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar +func customPartialBlobDecorFunc(s decor.Statistics) string { + if s.Total == 0 { + pairFmt := "%.1f / %.1f (skipped: %.1f)" + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill)) } - return decor.Any(producer(filler), wcc...) + pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)" + percentage := 100.0 * float64(s.Refill) / float64(s.Total) + return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage) } // createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter @@ -1063,7 +1111,6 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. // Use a normal progress bar when we know the size (i.e., size > 0). // Otherwise, use a spinner to indicate that something's happening. var bar *mpb.Bar - sstyle := mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft() if info.Size > 0 { if partial { bar = pool.AddBar(info.Size, @@ -1072,7 +1119,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. decor.OnComplete(decor.Name(prefix), onComplete), ), mpb.AppendDecorators( - customPartialBlobCounter(sstyle.Build()), + decor.Any(customPartialBlobDecorFunc), ), ) } else { @@ -1087,8 +1134,8 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. ) } } else { - bar = pool.Add(0, - sstyle.Build(), + bar = pool.New(0, + mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(), mpb.BarFillerClearOnComplete(), mpb.PrependDecorators( decor.OnComplete(decor.Name(prefix), onComplete), @@ -1182,28 +1229,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. // Fixing that will probably require passing more information to TryReusingBlob() than the current version of // the ImageDestination interface lets us pass in. - var ( - blobInfo types.BlobInfo - reused bool - err error - ) - // Note: the storage destination optimizes the committing of - // layers which requires passing the index of the layer. - // Hence, we need to special case and cast. - dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: ic.canSubstituteBlobs, - SrcRef: srcRef, - EmptyLayer: emptyLayer, - } - options.LayerIndex = &layerIndex - reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options) - } else { - reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) - } - + reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: ic.canSubstituteBlobs, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + }) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest) } @@ -1243,9 +1275,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // of the source file are not known yet and must be fetched. // Attempt a partial only when the source allows to retrieve a blob partially and // the destination has support for it. - imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable) - imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial) - if okSource && okDest && !diffIDIsNeeded { + if ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() && !diffIDIsNeeded { if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") hideProgressBar := true @@ -1253,29 +1283,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to bar.Abort(hideProgressBar) }() - progress := make(chan int64) - terminate := make(chan interface{}) - - defer close(terminate) - defer close(progress) - - proxy := imageSourceSeekableProxy{ - source: imgSource, - progress: progress, + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, } - go func() { - for { - select { - case written := <-progress: - bar.IncrInt64(written) - case <-terminate: - return - } - } - }() - bar.SetTotal(srcInfo.Size, false) - info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) + info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) if err == nil { bar.SetRefill(srcInfo.Size - bar.Current()) bar.SetCurrent(srcInfo.Size) @@ -1359,7 +1372,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } } - blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -1615,24 +1628,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } // === Finally, send the layer stream to dest. - var uploadedInfo types.BlobInfo - // Note: the storage destination optimizes the committing of layers - // which requires passing the index of the layer. Hence, we need to - // special case and cast. - dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions) - if ok { - options := internalTypes.PutBlobOptions{ - Cache: c.blobInfoCache, - IsConfig: isConfig, - EmptyLayer: emptyLayer, - } - if !isConfig { - options.LayerIndex = &layerIndex - } - uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) - } else { - uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig) + options := private.PutBlobOptions{ + Cache: c.blobInfoCache, + IsConfig: isConfig, + EmptyLayer: emptyLayer, + } + if !isConfig { + options.LayerIndex = &layerIndex } + uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "writing blob") } diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index b97edbf08b7..86ec8863aea 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -79,10 +79,10 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if !ic.canModifyManifest { - // We could also drop the !ic.canModifyManifest check and have the caller + if ic.cannotModifyManifestReason != "" { + // We could also drop this check and have the caller // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” + // messages. But it is nice to hide the “if we can't modify, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? diff --git a/vendor/github.com/containers/image/v5/copy/progress_reader.go b/vendor/github.com/containers/image/v5/copy/progress_reader.go index 42f490d326a..de23cec1b71 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_reader.go +++ b/vendor/github.com/containers/image/v5/copy/progress_reader.go @@ -5,8 +5,9 @@ import ( "io" "time" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" + "github.com/vbauerster/mpb/v7" ) // progressReader is a reader that reports its progress on an interval. @@ -80,25 +81,26 @@ func (r *progressReader) Read(p []byte) (int, error) { return n, err } -// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes +// blobChunkAccessorProxy wraps a BlobChunkAccessor and keeps track of how many bytes // are received. -type imageSourceSeekableProxy struct { - // source is the seekable input to read from. - source internalTypes.ImageSourceSeekable - // progress is the chan where the total number of bytes read so far are reported. - progress chan int64 +type blobChunkAccessorProxy struct { + wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor + bar *mpb.Bar // A progress bar updated with the number of bytes read so far } -// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received -// to the progress chan. -func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks) +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) if err == nil { total := int64(0) for _, c := range chunks { total += int64(c.Length) } - s.progress <- total + s.bar.IncrInt64(total) } return rc, errs, err } diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go index 61612a4d3d7..21a3facd72a 100644 --- a/vendor/github.com/containers/image/v5/copy/sign.go +++ b/vendor/github.com/containers/image/v5/copy/sign.go @@ -7,7 +7,7 @@ import ( ) // createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { +func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "initializing GPG") @@ -23,7 +23,7 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e } c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) + newSig, err := signature.SignDockerManifestWithOptions(manifest, dockerReference.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase}) if err != nil { return nil, errors.Wrap(err, "creating signature") } diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 3fe9a11d003..833323b424a 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -711,7 +711,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, return nil, err } defer res.Body.Close() - if err := httpResponseToError(res, "Requesting bear token"); err != nil { + if err := httpResponseToError(res, "Requesting bearer token"); err != nil { return nil, err } tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 56c39c141d7..e7af8f93d53 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -464,6 +464,18 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst } return err } + // A HTTP server may not be a registry at all, and just return 200 OK to everything + // (in particular that can fairly easily happen after tearing down a website and + // replacing it with a global 302 redirect to a new website, completely ignoring the + // path in the request); in that case we could “succeed” uploading a whole image. + // With docker/distribution we could rely on a Docker-Content-Digest header being present + // (because docker/distribution/registry/client has been failing uploads if it was missing), + // but that has been defined as explicitly optional by + // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers + // So, just note the missing header in a debug log. + if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { + logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") + } return nil } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 314e9b394ee..cb520d67056 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -16,7 +16,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" - internalTypes "github.com/containers/image/v5/internal/types" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" @@ -147,6 +147,11 @@ func (s *dockerImageSource) Close() error { return nil } +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (s *dockerImageSource) SupportsGetBlobAt() bool { + return true +} + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() // to read the image's layers. @@ -288,7 +293,7 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool { } // splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks -func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) { +func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { defer close(streams) defer close(errs) currentOffset := uint64(0) @@ -322,7 +327,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } // handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. -func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) { +func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) { defer close(streams) defer close(errs) if !strings.HasPrefix(mediaType, "multipart/") { @@ -357,9 +362,12 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read } } -// GetBlobAt returns a stream for the specified blob. +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. // The specified chunks must be not overlapping and sorted by their offset. -func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) var rangeVals []string @@ -401,7 +409,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, return streams, errs, nil case http.StatusBadRequest: res.Body.Close() - return nil, nil, internalTypes.BadPartialRequestError{Status: res.Status} + return nil, nil, private.BadPartialRequestError{Status: res.Status} default: err := httpResponseToError(res, "Error fetching partial blob") if err == nil { diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go new file mode 100644 index 00000000000..7b15871f7b6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go @@ -0,0 +1,6 @@ +package reference + +// Return true if the specified string fully matches `IdentifierRegexp`. +func IsFullIdentifier(s string) bool { + return anchoredIdentifierRegexp.MatchString(s) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go new file mode 100644 index 00000000000..82734a6cdc3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -0,0 +1,69 @@ +package imagedestination + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(dest) returns an object that provides the private.ImageDestination API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageDestination, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageDestination. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(dest types.ImageDestination) private.ImageDestination { + if dest2, ok := dest.(private.ImageDestination); ok { + return dest2 + } + return &wrapped{ImageDestination: dest} +} + +// wrapped provides the private.ImageDestination operations +// for a destination that only implements types.ImageDestination +type wrapped struct { + types.ImageDestination +} + +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (w *wrapped) SupportsPutBlobPartial() bool { + return false +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) +} + +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { + return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name()) +} + +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) +} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go new file mode 100644 index 00000000000..fe1be8d9eab --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -0,0 +1,47 @@ +package imagesource + +import ( + "context" + "fmt" + "io" + + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/types" +) + +// FromPublic(src) returns an object that provides the private.ImageSource API +// +// Eventually, we might want to expose this function, and methods of the returned object, +// as a public API (or rather, a variant that does not include the already-superseded +// methods of types.ImageSource, and has added more future-proofing), and more strongly +// deprecate direct use of types.ImageSource. +// +// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct +// with public methods, or perhaps a private interface), so that we can add methods +// without breaking any external implementors of a public interface. +func FromPublic(src types.ImageSource) private.ImageSource { + if src2, ok := src.(private.ImageSource); ok { + return src2 + } + return &wrapped{ImageSource: src} +} + +// wrapped provides the private.ImageSource operations +// for a source that only implements types.ImageSource +type wrapped struct { + types.ImageSource +} + +// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. +func (w *wrapped) SupportsGetBlobAt() bool { + return false +} + +// GetBlobAt returns a sequential channel of readers that contain data for the requested +// blob chunks, and a channel that might get a single error value. +// The specified chunks must be not overlapping and sorted by their offset. +// The readers must be fully consumed, in the order they are returned, before blocking +// to read the next chunk. +func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name()) +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go deleted file mode 100644 index bf6cc87d421..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// Key represents a single key linked to one or more kernel keyrings. -type Key struct { - Name string - - id, ring keyID - size int -} - -// ID returns the 32-bit kernel identifier for a specific key -func (k *Key) ID() int32 { - return int32(k.id) -} - -// Get the key's value as a byte slice -func (k *Key) Get() ([]byte, error) { - var ( - b []byte - err error - sizeRead int - ) - - if k.size == 0 { - k.size = 512 - } - - size := k.size - - b = make([]byte, int(size)) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - k.size = sizeRead - } - } - return b[:k.size], err -} - -// Unlink a key from the keyring it was loaded from (or added to). If the key -// is not linked to any other keyrings, it is destroyed. -func (k *Key) Unlink() error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) - return err -} - -// Describe returns a string describing the attributes of a specified key -func (k *Key) Describe() (string, error) { - keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) - if err != nil { - return "", err - } - return keyAttr, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go deleted file mode 100644 index 5eaad615c7c..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) -package keyctl - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Keyring is the basic interface to a linux keyctl keyring. -type Keyring interface { - ID - Add(string, []byte) (*Key, error) - Search(string) (*Key, error) -} - -type keyring struct { - id keyID -} - -// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. -type ID interface { - ID() int32 -} - -// Add a new key to a keyring. The key can be searched for later by name. -func (kr *keyring) Add(name string, key []byte) (*Key, error) { - r, err := unix.AddKey("user", name, key, int(kr.id)) - if err == nil { - key := &Key{Name: name, id: keyID(r), ring: kr.id} - return key, nil - } - return nil, err -} - -// Search for a key by name, this also searches child keyrings linked to this -// one. The key, if found, is linked to the top keyring that Search() was called -// from. -func (kr *keyring) Search(name string) (*Key, error) { - id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) - if err == nil { - return &Key{Name: name, id: keyID(id), ring: kr.id}, nil - } - return nil, err -} - -// ID returns the 32-bit kernel identifier of a keyring -func (kr *keyring) ID() int32 { - return int32(kr.id) -} - -// SessionKeyring returns the current login session keyring -func SessionKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) -} - -// UserKeyring returns the keyring specific to the current user. -func UserKeyring() (Keyring, error) { - return newKeyring(unix.KEY_SPEC_USER_KEYRING) -} - -// Unlink an object from a keyring -func Unlink(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// Link a key into a keyring -func Link(parent Keyring, child ID) error { - _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) - return err -} - -// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it -func ReadUserKeyring() ([]*Key, error) { - var ( - b []byte - err error - sizeRead int - ) - krSize := 4 - size := krSize - b = make([]byte, size) - sizeRead = size + 1 - for sizeRead > size { - r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) - if err != nil { - return nil, err - } - - if sizeRead = int(r1); sizeRead > size { - b = make([]byte, sizeRead) - size = sizeRead - sizeRead = size + 1 - } else { - krSize = sizeRead - } - } - keyIDs := getKeyIDsFromByte(b[:krSize]) - return keyIDs, err -} - -func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { - idSize := 4 - var keys []*Key - for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { - tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) - keys = append(keys, &Key{id: keyID(tempID)}) - } - return keys -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go deleted file mode 100644 index 5f4d2157ae9..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -// KeyPerm represents in-kernel access control permission to keys and keyrings -// as a 32-bit integer broken up into four permission sets, one per byte. -// In MSB order, the perms are: Processor, User, Group, Other. -type KeyPerm uint32 - -const ( - // PermOtherAll sets all permission for Other - PermOtherAll KeyPerm = 0x3f << (8 * iota) - // PermGroupAll sets all permission for Group - PermGroupAll - // PermUserAll sets all permission for User - PermUserAll - // PermProcessAll sets all permission for Processor - PermProcessAll -) - -// SetPerm sets the permissions on a key or keyring. -func SetPerm(k ID, p KeyPerm) error { - err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) - return err -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go deleted file mode 100644 index f61666e42c2..00000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 Jesse Sipprell. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package keyctl - -import ( - "golang.org/x/sys/unix" -) - -type keyID int32 - -func newKeyring(id keyID) (*keyring, error) { - r1, err := unix.KeyctlGetKeyringID(int(id), true) - if err != nil { - return nil, err - } - - if id < 0 { - r1 = int(id) - } - return &keyring{id: keyID(r1)}, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go new file mode 100644 index 00000000000..65788651fb5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -0,0 +1,106 @@ +package private + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +// ImageSource is an internal extension to the types.ImageSource interface. +type ImageSource interface { + types.ImageSource + + // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. + SupportsGetBlobAt() bool + // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). + BlobChunkAccessor +} + +// ImageDestination is an internal extension to the types.ImageDestination +// interface. +type ImageDestination interface { + types.ImageDestination + + // SupportsPutBlobPartial returns true if PutBlobPartial is supported. + SupportsPutBlobPartial() bool + + // PutBlobWithOptions writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error) + + // PutBlobPartial attempts to create a blob using the data that is already present + // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. + // It is available only if SupportsPutBlobPartial(). + // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller + // should fall back to PutBlobWithOptions. + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) + + // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error) +} + +// PutBlobOptions are used in PutBlobWithOptions. +type PutBlobOptions struct { + Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos. + IsConfig bool // True if the blob is a config + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. +} + +// TryReusingBlobOptions are used in TryReusingBlobWithOptions. +type TryReusingBlobOptions struct { + Cache types.BlobInfoCache // Cache to use and/or update. + // If true, it is allowed to use an equivalent of the desired blob; + // in that case the returned info may not match the input. + CanSubstitute bool + + // The following fields are new to internal/private. Users of internal/private MUST fill them in, + // but they also must expect that they will be ignored by types.ImageDestination transports. + + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. +} + +// ImageSourceChunk is a portion of a blob. +// This API is experimental and can be changed without bumping the major version number. +type ImageSourceChunk struct { + Offset uint64 + Length uint64 +} + +// BlobChunkAccessor allows fetching discontiguous chunks of a blob. +type BlobChunkAccessor interface { + // GetBlobAt returns a sequential channel of readers that contain data for the requested + // blob chunks, and a channel that might get a single error value. + // The specified chunks must be not overlapping and sorted by their offset. + // The readers must be fully consumed, in the order they are returned, before blocking + // to read the next chunk. + GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) +} + +// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request. +type BadPartialRequestError struct { + Status string +} + +func (e BadPartialRequestError) Error() string { + return e.Status +} diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go deleted file mode 100644 index 388f8cf3b49..00000000000 --- a/vendor/github.com/containers/image/v5/internal/types/types.go +++ /dev/null @@ -1,91 +0,0 @@ -package types - -import ( - "context" - "io" - - "github.com/containers/image/v5/docker/reference" - publicTypes "github.com/containers/image/v5/types" -) - -// ImageDestinationWithOptions is an internal extension to the ImageDestination -// interface. -type ImageDestinationWithOptions interface { - publicTypes.ImageDestination - - // PutBlobWithOptions is a wrapper around PutBlob. If - // options.LayerIndex is set, the blob will be committed directly. - // Either by the calling goroutine or by another goroutine already - // committing layers. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error) - - // TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If - // options.LayerIndex is set, the reused blob will be recoreded as - // already pulled. - // - // Please note that TryReusingBlobWithOptions and PutBlobWithOptions - // *must* be used the together. Mixing the two with non "WithOptions" - // functions is not supported. - TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error) -} - -// PutBlobOptions are used in PutBlobWithOptions. -type PutBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Denotes whether the blob is a config or not. - IsConfig bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int -} - -// TryReusingBlobOptions are used in TryReusingBlobWithOptions. -type TryReusingBlobOptions struct { - // Cache to look up blob infos. - Cache publicTypes.BlobInfoCache - // Use an equivalent of the desired blob. - CanSubstitute bool - // Indicates an empty layer. - EmptyLayer bool - // The corresponding index in the layer slice. - LayerIndex *int - // The reference of the image that contains the target blob. - SrcRef reference.Named -} - -// ImageSourceChunk is a portion of a blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceChunk struct { - Offset uint64 - Length uint64 -} - -// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceSeekable interface { - // GetBlobAt returns a stream for the specified blob. - // The specified chunks must be not overlapping and sorted by their offset. - GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error) -} - -// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable. -// This API is experimental and can be changed without bumping the major version number. -type ImageDestinationPartial interface { - // PutBlobPartial writes contents of stream and returns data representing the result. - PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error) -} - -// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request. -type BadPartialRequestError struct { - Status string -} - -func (e BadPartialRequestError) Error() string { - return e.Status -} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 511cdcc37e5..20955ab7ff4 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -51,7 +51,7 @@ const ( // other than the ones the caller specifically allows. // expectedMIMEType is used only for diagnostics. // NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambigous +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous // data that just isn’t what was expected, as opposed to actually ambiguous data. func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, allowed allowedManifestFields) error { @@ -71,7 +71,7 @@ func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, Manifests interface{} `json:"manifests"` }{} if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this shold not happen; + // The caller was supposed to already validate version numbers, so this should not happen; // let’s not bother with making this error “nice”. return err } diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 4b644f2539a..2e3e5da15ea 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -110,7 +110,8 @@ func GuessMIMEType(manifest []byte) string { } switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. return meta.MediaType } // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. @@ -121,9 +122,9 @@ func GuessMIMEType(manifest []byte) string { } return DockerV2Schema1MediaType case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. ociMan := struct { Config struct { MediaType string `json:"mediaType"` diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index c4616b96500..5892184df19 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -66,6 +66,7 @@ func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descript return &OCI1{ imgspecv1.Manifest{ Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, Config: config, Layers: layers, }, diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index 5bec43ff998..c4f11e09c56 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -119,6 +119,7 @@ func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[ index := OCI1Index{ imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: make([]imgspecv1.Descriptor, len(components)), Annotations: dupStringStringMap(annotations), }, @@ -195,6 +196,7 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { index := OCI1Index{ Index: imgspecv1.Index{ Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, Manifests: []imgspecv1.Descriptor{}, Annotations: make(map[string]string), }, diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index d0ee7263527..c8156cc3a9b 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -112,7 +112,7 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *ociImageDestination) HasThreadSafePutBlob() bool { - return false + return true } // PutBlob writes contents of stream and returns data representing the result. diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go new file mode 100644 index 00000000000..b67a83f33da --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/blobcache.go @@ -0,0 +1,539 @@ +package blobcache + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + _ types.ImageReference = &BlobCache{} + _ types.ImageSource = &blobCacheSource{} + _ types.ImageDestination = &blobCacheDestination{} +) + +const ( + compressedNote = ".compressed" + decompressedNote = ".decompressed" +) + +// BlobCache is an object which saves copies of blobs that are written to it while passing them +// through to some real destination, and which can be queried directly in order to read them +// back. +// +// Implements types.ImageReference. +type BlobCache struct { + reference types.ImageReference + // WARNING: The contents of this directory may be accessed concurrently, + // both within this process and by multiple different processes + directory string + compress types.LayerCompression +} + +type blobCacheSource struct { + reference *BlobCache + source types.ImageSource + sys types.SystemContext + // this mutex synchronizes the counters below + mu sync.Mutex + cacheHits int64 + cacheMisses int64 + cacheErrors int64 +} + +type blobCacheDestination struct { + reference *BlobCache + destination types.ImageDestination +} + +func makeFilename(blobSum digest.Digest, isConfig bool) string { + if isConfig { + return blobSum.String() + ".config" + } + return blobSum.String() +} + +// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are +// written to the destination image created from the resulting reference will also be stored +// as-is to the specified directory or a temporary directory. +// The compress argument controls whether or not the cache will try to substitute a compressed +// or different version of a blob when preparing the list of layers when reading an image. +func NewBlobCache(ref types.ImageReference, directory string, compress types.LayerCompression) (*BlobCache, error) { + if directory == "" { + return nil, errors.Errorf("error creating cache around reference %q: no directory specified", transports.ImageName(ref)) + } + switch compress { + case types.Compress, types.Decompress, types.PreserveOriginal: + // valid value, accept it + default: + return nil, errors.Errorf("unhandled LayerCompression value %v", compress) + } + return &BlobCache{ + reference: ref, + directory: directory, + compress: compress, + }, nil +} + +func (b *BlobCache) Transport() types.ImageTransport { + return b.reference.Transport() +} + +func (b *BlobCache) StringWithinTransport() string { + return b.reference.StringWithinTransport() +} + +func (b *BlobCache) DockerReference() reference.Named { + return b.reference.DockerReference() +} + +func (b *BlobCache) PolicyConfigurationIdentity() string { + return b.reference.PolicyConfigurationIdentity() +} + +func (b *BlobCache) PolicyConfigurationNamespaces() []string { + return b.reference.PolicyConfigurationNamespaces() +} + +func (b *BlobCache) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return b.reference.DeleteImage(ctx, sys) +} + +func (b *BlobCache) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, nil + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(b.directory, makeFilename(blobinfo.Digest, isConfig)) + fileInfo, err := os.Stat(filename) + if err == nil && (blobinfo.Size == -1 || blobinfo.Size == fileInfo.Size()) { + return true, fileInfo.Size(), nil + } + if !os.IsNotExist(err) { + return false, -1, errors.Wrap(err, "checking size") + } + } + + return false, -1, nil +} + +func (b *BlobCache) Directory() string { + return b.directory +} + +func (b *BlobCache) ClearCache() error { + f, err := os.Open(b.directory) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + return errors.Wrapf(err, "error reading directory %q", b.directory) + } + for _, name := range names { + pathname := filepath.Join(b.directory, name) + if err = os.RemoveAll(pathname); err != nil { + return errors.Wrapf(err, "clearing cache for %q", transports.ImageName(b)) + } + } + return nil +} + +func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := b.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(b.reference)) + } + return image.FromSource(ctx, sys, src) +} + +func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + src, err := b.reference.NewImageSource(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image source %q", transports.ImageName(b.reference)) + } + logrus.Debugf("starting to read from image %q using blob cache in %q (compression=%v)", transports.ImageName(b.reference), b.directory, b.compress) + return &blobCacheSource{reference: b, source: src, sys: *sys}, nil +} + +func (b *BlobCache) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + dest, err := b.reference.NewImageDestination(ctx, sys) + if err != nil { + return nil, errors.Wrapf(err, "error creating new image destination %q", transports.ImageName(b.reference)) + } + logrus.Debugf("starting to write to image %q using blob cache in %q", transports.ImageName(b.reference), b.directory) + return &blobCacheDestination{reference: b, destination: dest}, nil +} + +func (s *blobCacheSource) Reference() types.ImageReference { + return s.reference +} + +func (s *blobCacheSource) Close() error { + logrus.Debugf("finished reading from image %q using blob cache: cache had %d hits, %d misses, %d errors", transports.ImageName(s.reference), s.cacheHits, s.cacheMisses, s.cacheErrors) + return s.source.Close() +} + +func (s *blobCacheSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + filename := filepath.Join(s.reference.directory, makeFilename(*instanceDigest, false)) + manifestBytes, err := ioutil.ReadFile(filename) + if err == nil { + s.cacheHits++ + return manifestBytes, manifest.GuessMIMEType(manifestBytes), nil + } + if !os.IsNotExist(err) { + s.cacheErrors++ + return nil, "", errors.Wrap(err, "checking for manifest file") + } + } + s.cacheMisses++ + return s.source.GetManifest(ctx, instanceDigest) +} + +func (s *blobCacheSource) HasThreadSafeGetBlob() bool { + return s.source.HasThreadSafeGetBlob() +} + +func (s *blobCacheSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + present, size, err := s.reference.HasBlob(blobinfo) + if err != nil { + return nil, -1, err + } + if present { + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(s.reference.directory, makeFilename(blobinfo.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + s.mu.Lock() + s.cacheHits++ + s.mu.Unlock() + return f, size, nil + } + if !os.IsNotExist(err) { + s.mu.Lock() + s.cacheErrors++ + s.mu.Unlock() + return nil, -1, errors.Wrap(err, "checking for cache") + } + } + } + s.mu.Lock() + s.cacheMisses++ + s.mu.Unlock() + rc, size, err := s.source.GetBlob(ctx, blobinfo, cache) + if err != nil { + return rc, size, errors.Wrapf(err, "error reading blob from source image %q", transports.ImageName(s.reference)) + } + return rc, size, nil +} + +func (s *blobCacheSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.source.GetSignatures(ctx, instanceDigest) +} + +func (s *blobCacheSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + signatures, err := s.source.GetSignatures(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error checking if image %q has signatures", transports.ImageName(s.reference)) + } + canReplaceBlobs := !(len(signatures) > 0 && len(signatures[0]) > 0) + + infos, err := s.source.LayerInfosForCopy(ctx, instanceDigest) + if err != nil { + return nil, errors.Wrapf(err, "error getting layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + if infos == nil { + img, err := image.FromUnparsedImage(ctx, &s.sys, image.UnparsedInstance(s.source, instanceDigest)) + if err != nil { + return nil, errors.Wrapf(err, "error opening image to get layer infos for copying image %q through cache", transports.ImageName(s.reference)) + } + infos = img.LayerInfos() + } + + if canReplaceBlobs && s.reference.compress != types.PreserveOriginal { + replacedInfos := make([]types.BlobInfo, 0, len(infos)) + for _, info := range infos { + var replaceDigest []byte + var err error + blobFile := filepath.Join(s.reference.directory, makeFilename(info.Digest, false)) + alternate := "" + switch s.reference.compress { + case types.Compress: + alternate = blobFile + compressedNote + replaceDigest, err = ioutil.ReadFile(alternate) + case types.Decompress: + alternate = blobFile + decompressedNote + replaceDigest, err = ioutil.ReadFile(alternate) + } + if err == nil && digest.Digest(replaceDigest).Validate() == nil { + alternate = filepath.Join(filepath.Dir(alternate), makeFilename(digest.Digest(replaceDigest), false)) + fileInfo, err := os.Stat(alternate) + if err == nil { + switch info.MediaType { + case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip: + switch s.reference.compress { + case types.Compress: + info.MediaType = v1.MediaTypeImageLayerGzip + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + info.MediaType = v1.MediaTypeImageLayer + info.CompressionAlgorithm = nil + } + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, manifest.DockerV2Schema2LayerMediaType: + switch s.reference.compress { + case types.Compress: + info.MediaType = manifest.DockerV2Schema2LayerMediaType + info.CompressionAlgorithm = &compression.Gzip + case types.Decompress: + // nope, not going to suggest anything, it's not allowed by the spec + replacedInfos = append(replacedInfos, info) + continue + } + } + logrus.Debugf("suggesting cached blob with digest %q, type %q, and compression %v in place of blob with digest %q", string(replaceDigest), info.MediaType, s.reference.compress, info.Digest.String()) + info.CompressionOperation = s.reference.compress + info.Digest = digest.Digest(replaceDigest) + info.Size = fileInfo.Size() + logrus.Debugf("info = %#v", info) + } + } + replacedInfos = append(replacedInfos, info) + } + infos = replacedInfos + } + + return infos, nil +} + +func (d *blobCacheDestination) Reference() types.ImageReference { + return d.reference +} + +func (d *blobCacheDestination) Close() error { + logrus.Debugf("finished writing to image %q using blob cache", transports.ImageName(d.reference)) + return d.destination.Close() +} + +func (d *blobCacheDestination) SupportedManifestMIMETypes() []string { + return d.destination.SupportedManifestMIMETypes() +} + +func (d *blobCacheDestination) SupportsSignatures(ctx context.Context) error { + return d.destination.SupportsSignatures(ctx) +} + +func (d *blobCacheDestination) DesiredLayerCompression() types.LayerCompression { + return d.destination.DesiredLayerCompression() +} + +func (d *blobCacheDestination) AcceptsForeignLayerURLs() bool { + return d.destination.AcceptsForeignLayerURLs() +} + +func (d *blobCacheDestination) MustMatchRuntimeOS() bool { + return d.destination.MustMatchRuntimeOS() +} + +func (d *blobCacheDestination) IgnoresEmbeddedDockerReference() bool { + return d.destination.IgnoresEmbeddedDockerReference() +} + +// Decompress and save the contents of the decompressReader stream into the passed-in temporary +// file. If we successfully save all of the data, rename the file to match the digest of the data, +// and make notes about the relationship between the file that holds a copy of the compressed data +// and this new file. +func saveStream(wg *sync.WaitGroup, decompressReader io.ReadCloser, tempFile *os.File, compressedFilename string, compressedDigest digest.Digest, isConfig bool, alternateDigest *digest.Digest) { + defer wg.Done() + // Decompress from and digest the reading end of that pipe. + decompressed, err3 := archive.DecompressStream(decompressReader) + digester := digest.Canonical.Digester() + if err3 == nil { + // Read the decompressed data through the filter over the pipe, blocking until the + // writing end is closed. + _, err3 = io.Copy(io.MultiWriter(tempFile, digester.Hash()), decompressed) + } else { + // Drain the pipe to keep from stalling the PutBlob() thread. + if _, err := io.Copy(ioutil.Discard, decompressReader); err != nil { + logrus.Debugf("error draining the pipe: %v", err) + } + } + decompressReader.Close() + decompressed.Close() + tempFile.Close() + // Determine the name that we should give to the uncompressed copy of the blob. + decompressedFilename := filepath.Join(filepath.Dir(tempFile.Name()), makeFilename(digester.Digest(), isConfig)) + if err3 == nil { + // Rename the temporary file. + if err3 = os.Rename(tempFile.Name(), decompressedFilename); err3 != nil { + logrus.Debugf("error renaming new decompressed copy of blob %q into place at %q: %v", digester.Digest().String(), decompressedFilename, err3) + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } else { + *alternateDigest = digester.Digest() + // Note the relationship between the two files. + if err3 = ioutils.AtomicWriteFile(decompressedFilename+compressedNote, []byte(compressedDigest.String()), 0600); err3 != nil { + logrus.Debugf("error noting that the compressed version of %q is %q: %v", digester.Digest().String(), compressedDigest.String(), err3) + } + if err3 = ioutils.AtomicWriteFile(compressedFilename+decompressedNote, []byte(digester.Digest().String()), 0600); err3 != nil { + logrus.Debugf("error noting that the decompressed version of %q is %q: %v", compressedDigest.String(), digester.Digest().String(), err3) + } + } + } else { + // Remove the temporary file. + if err3 = os.Remove(tempFile.Name()); err3 != nil { + logrus.Debugf("error cleaning up temporary file %q for decompressed copy of blob %q: %v", tempFile.Name(), compressedDigest.String(), err3) + } + } +} + +func (d *blobCacheDestination) HasThreadSafePutBlob() bool { + return d.destination.HasThreadSafePutBlob() +} + +func (d *blobCacheDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + var tempfile *os.File + var err error + var n int + var alternateDigest digest.Digest + var closer io.Closer + wg := new(sync.WaitGroup) + needToWait := false + compression := archive.Uncompressed + if inputInfo.Digest != "" { + filename := filepath.Join(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + tempfile, err = ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err == nil { + stream = io.TeeReader(stream, tempfile) + defer func() { + if err == nil { + if err = os.Rename(tempfile.Name(), filename); err != nil { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + err = errors.Wrapf(err, "error renaming new layer for blob %q into place at %q", inputInfo.Digest.String(), filename) + } + } else { + if err2 := os.Remove(tempfile.Name()); err2 != nil { + logrus.Debugf("error cleaning up temporary file %q for blob %q: %v", tempfile.Name(), inputInfo.Digest.String(), err2) + } + } + tempfile.Close() + }() + } else { + logrus.Debugf("error while creating a temporary file under %q to hold blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err) + } + if !isConfig { + initial := make([]byte, 8) + n, err = stream.Read(initial) + if n > 0 { + // Build a Reader that will still return the bytes that we just + // read, for PutBlob()'s sake. + stream = io.MultiReader(bytes.NewReader(initial[:n]), stream) + if n >= len(initial) { + compression = archive.DetectCompression(initial[:n]) + } + if compression == archive.Gzip { + // The stream is compressed, so create a file which we'll + // use to store a decompressed copy. + decompressedTemp, err2 := ioutil.TempFile(d.reference.directory, makeFilename(inputInfo.Digest, isConfig)) + if err2 != nil { + logrus.Debugf("error while creating a temporary file under %q to hold decompressed blob %q: %v", d.reference.directory, inputInfo.Digest.String(), err2) + decompressedTemp.Close() + } else { + // Write a copy of the compressed data to a pipe, + // closing the writing end of the pipe after + // PutBlob() returns. + decompressReader, decompressWriter := io.Pipe() + closer = decompressWriter + stream = io.TeeReader(stream, decompressWriter) + // Let saveStream() close the reading end and handle the temporary file. + wg.Add(1) + needToWait = true + go saveStream(wg, decompressReader, decompressedTemp, filename, inputInfo.Digest, isConfig, &alternateDigest) + } + } + } + } + } + newBlobInfo, err := d.destination.PutBlob(ctx, stream, inputInfo, cache, isConfig) + if closer != nil { + closer.Close() + } + if needToWait { + wg.Wait() + } + if err != nil { + return newBlobInfo, errors.Wrapf(err, "error storing blob to image destination for cache %q", transports.ImageName(d.reference)) + } + if alternateDigest.Validate() == nil { + logrus.Debugf("added blob %q (also %q) to the cache at %q", inputInfo.Digest.String(), alternateDigest.String(), d.reference.directory) + } else { + logrus.Debugf("added blob %q to the cache at %q", inputInfo.Digest.String(), d.reference.directory) + } + return newBlobInfo, nil +} + +func (d *blobCacheDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + present, reusedInfo, err := d.destination.TryReusingBlob(ctx, info, cache, canSubstitute) + if err != nil || present { + return present, reusedInfo, err + } + + for _, isConfig := range []bool{false, true} { + filename := filepath.Join(d.reference.directory, makeFilename(info.Digest, isConfig)) + f, err := os.Open(filename) + if err == nil { + defer f.Close() + uploadedInfo, err := d.destination.PutBlob(ctx, f, info, cache, isConfig) + if err != nil { + return false, types.BlobInfo{}, err + } + return true, uploadedInfo, nil + } + } + + return false, types.BlobInfo{}, nil +} + +func (d *blobCacheDestination) PutManifest(ctx context.Context, manifestBytes []byte, instanceDigest *digest.Digest) error { + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + logrus.Warnf("error digesting manifest %q: %v", string(manifestBytes), err) + } else { + filename := filepath.Join(d.reference.directory, makeFilename(manifestDigest, false)) + if err = ioutils.AtomicWriteFile(filename, manifestBytes, 0600); err != nil { + logrus.Warnf("error saving manifest as %q: %v", filename, err) + } + } + return d.destination.PutManifest(ctx, manifestBytes, instanceDigest) +} + +func (d *blobCacheDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + return d.destination.PutSignatures(ctx, signatures, instanceDigest) +} + +func (d *blobCacheDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + return d.destination.Commit(ctx, unparsedToplevel) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 63f5bd53e1b..1d73dc405e7 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -54,8 +54,8 @@ var ( // SetCredentials stores the username and password in a location // appropriate for sys and the users’ configuration. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. // Returns a human-redable description of the location that was updated. // NOTE: The return value is only intended to be read by humans; its form is not an API, // it may change (or new forms can be added) any time. @@ -128,11 +128,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // possible sources, and then call `GetCredentials` on them. That // prevents us from having to reverse engineer the logic in // `GetCredentials`. - allRegistries := make(map[string]bool) - addRegistry := func(s string) { - allRegistries[s] = true + allKeys := make(map[string]bool) + addKey := func(s string) { + allKeys[s] = true } + // To use GetCredentials, we must at least convert the URL forms into host names. + // While we're at it, we’ll also canonicalize docker.io to the standard format. + normalizedDockerIORegistry := normalizeRegistry("docker.io") + helpers, err := sysregistriesv2.CredentialHelpers(sys) if err != nil { return nil, err @@ -151,10 +155,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // direct mapping to a registry, so we can just // walk the map. for registry := range auths.CredHelpers { - addRegistry(registry) + addKey(registry) } - for registry := range auths.AuthConfigs { - addRegistry(registry) + for key := range auths.AuthConfigs { + key := normalizeAuthFileKey(key, path.legacyFormat) + if key == normalizedDockerIORegistry { + key = "docker.io" + } + addKey(key) } } // External helpers. @@ -166,7 +174,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon switch errors.Cause(err) { case nil: for registry := range creds { - addRegistry(registry) + addKey(registry) } case exec.ErrNotFound: // It's okay if the helper doesn't exist. @@ -179,17 +187,15 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. authConfigs := make(map[string]types.DockerAuthConfig) - for registry := range allRegistries { - authConf, err := GetCredentials(sys, registry) + for key := range allKeys { + authConf, err := GetCredentials(sys, key) if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - // Ignore if the credentials could not be found (anymore). - continue - } // Note: we rely on the logging in `GetCredentials`. return nil, err } - authConfigs[registry] = authConf + if authConf != (types.DockerAuthConfig{}) { + authConfigs[key] = authConf + } } return authConfigs, nil @@ -230,16 +236,14 @@ func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { return paths } -// GetCredentials returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json -// file or .docker/config.json, including support for OAuth2 and IdentityToken. +// GetCredentials returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. // If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // -// GetCredentialsForRef should almost always be used in favor of this API to -// allow different credentials for different repositories on the same registry. -func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, nil, registry, homedir.Get()) +// GetCredentialsForRef should almost always be used in favor of this API. +func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, key, homedir.Get()) } // GetCredentialsForRef returns the registry credentials necessary for @@ -247,35 +251,39 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth // appropriate for sys and the users’ configuration. // If an entry is not found, an empty struct is returned. func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, ref, reference.Domain(ref), homedir.Get()) + return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) } // getCredentialsWithHomeDir is an internal implementation detail of // GetCredentialsForRef and GetCredentials. It exists only to allow testing it // with an artificial home directory. -func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, registry, homeDir string) (types.DockerAuthConfig, error) { - // consistency check of the ref and registry arguments - if ref != nil && reference.Domain(ref) != registry { - return types.DockerAuthConfig{}, errors.Errorf( - "internal error: provided reference domain %q name does not match registry %q", - reference.Domain(ref), registry, - ) +func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { + _, err := validateKey(key) + if err != nil { + return types.DockerAuthConfig{}, err } if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debugf("Returning credentials for %s from DockerAuthConfig", registry) + logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) return *sys.DockerAuthConfig, nil } + var registry string // We compute this once because it is used in several places. + if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { + registry = key[:firstSlash] + } else { + registry = key + } + // Anonymous function to query credentials from auth files. getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { for _, path := range getAuthFilePaths(sys, homeDir) { - authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat) + authConfig, err := findCredentialsInFile(key, registry, path.path, path.legacyFormat) if err != nil { return types.DockerAuthConfig{}, "", err } - if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { + if authConfig != (types.DockerAuthConfig{}) { return authConfig, path.path, nil } } @@ -291,57 +299,61 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re for _, helper := range helpers { var ( creds types.DockerAuthConfig + helperKey string credHelperPath string err error ) switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: + helperKey = key creds, credHelperPath, err = getCredentialsFromAuthFiles() // External helpers. default: + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers, but a "registry" is a valid parent of "key". + helperKey = registry creds, err = getAuthFromCredHelper(helper, registry) } if err != nil { - logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", registry, helper, err) + logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) multiErr = multierror.Append(multiErr, err) continue } - if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 { - continue - } - msg := fmt.Sprintf("Found credentials for %s in credential helper %s", registry, helper) - if credHelperPath != "" { - msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + if creds != (types.DockerAuthConfig{}) { + msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) + if credHelperPath != "" { + msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) + } + logrus.Debug(msg) + return creds, nil } - logrus.Debug(msg) - return creds, nil } if multiErr != nil { return types.DockerAuthConfig{}, multiErr } - logrus.Debugf("No credentials for %s found", registry) + logrus.Debugf("No credentials for %s found", key) return types.DockerAuthConfig{}, nil } -// GetAuthentication returns the registry credentials stored in the -// registry-specific credential helpers or in the default global credentials -// helpers with falling back to using either auth.json file or -// .docker/config.json +// GetAuthentication returns the registry credentials matching key, appropriate for +// sys and the users’ configuration. +// If an entry is not found, an empty struct is returned. +// A valid key is a repository, a namespace within a registry, or a registry hostname. // // Deprecated: This API only has support for username and password. To get the // support for oauth2 in container registry authentication, we added the new -// GetCredentials API. The new API should be used and this API is kept to +// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to // maintain backward compatibility. -func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { - return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { + return getAuthenticationWithHomeDir(sys, key, homedir.Get()) } // getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, // it exists only to allow testing it with an artificial home directory. -func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { - auth, err := getCredentialsWithHomeDir(sys, nil, registry, homeDir) +func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, key, homeDir) if err != nil { return "", "", err } @@ -353,8 +365,8 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir st // RemoveAuthentication removes credentials for `key` from all possible // sources such as credential helpers and auth files. -// A valid key can be either a registry hostname or additionally a namespace if -// the AuthenticationFileHelper is being unsed. +// A valid key is a repository, a namespace within a registry, or a registry hostname; +// using forms other than just a registry may fail depending on configuration. func RemoveAuthentication(sys *types.SystemContext, key string) error { isNamespaced, err := validateKey(key) if err != nil { @@ -606,6 +618,10 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) + err = nil + } return types.DockerAuthConfig{}, err } @@ -639,26 +655,28 @@ func deleteAuthFromCredHelper(credHelper, registry string) error { return helperclient.Erase(p, registry) } -// findAuthentication looks for auth of registry in path. If ref is -// not nil, then it will be taken into account when looking up the -// authentication credentials. -func findAuthentication(ref reference.Named, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { +// findCredentialsInFile looks for credentials matching "key" +// (which is "registry" or a namespace in "registry") in "path". +func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { auths, err := readJSONFile(path, legacyFormat) if err != nil { return types.DockerAuthConfig{}, errors.Wrapf(err, "reading JSON file %q", path) } // First try cred helpers. They should always be normalized. + // This intentionally uses "registry", not "key"; we don't support namespaced + // credentials in helpers. if ch, exists := auths.CredHelpers[registry]; exists { + logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path) return getAuthFromCredHelper(ch, registry) } - // Support for different paths in auth. + // Support sub-registry namespaces in auth. // (This is not a feature of ~/.docker/config.json; we support it even for // those files as an extension.) var keys []string - if !legacyFormat && ref != nil { - keys = authKeysForRef(ref) + if !legacyFormat { + keys = authKeysForKey(key) } else { keys = []string{registry} } @@ -686,26 +704,28 @@ func findAuthentication(ref reference.Named, registry, path string, legacyFormat } } + // Only log this if we found nothing; getCredentialsWithHomeDir logs the + // source of found data. + logrus.Debugf("No credentials matching %s found in %s", key, path) return types.DockerAuthConfig{}, nil } -// authKeysForRef returns the valid paths for a provided reference. For example, -// when given a reference "quay.io/repo/ns/image:tag", then it would return +// authKeysForKey returns the keys matching a provided auth file key, in order +// from the best match to worst. For example, +// when given a repository key "quay.io/repo/ns/image", it returns // - quay.io/repo/ns/image // - quay.io/repo/ns // - quay.io/repo // - quay.io -func authKeysForRef(ref reference.Named) (res []string) { - name := ref.Name() - +func authKeysForKey(key string) (res []string) { for { - res = append(res, name) + res = append(res, key) - lastSlash := strings.LastIndex(name, "/") + lastSlash := strings.LastIndex(key, "/") if lastSlash == -1 { break } - name = name[:lastSlash] + key = key[:lastSlash] } return res @@ -759,11 +779,24 @@ func normalizeRegistry(registry string) string { // validateKey verifies that the input key does not have a prefix that is not // allowed and returns an indicator if the key is namespaced. -func validateKey(key string) (isNamespaced bool, err error) { +func validateKey(key string) (bool, error) { if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { - return isNamespaced, errors.Errorf("key %s contains http[s]:// prefix", key) + return false, errors.Errorf("key %s contains http[s]:// prefix", key) } + // Ideally this should only accept explicitly valid keys, compare + // validateIdentityRemappingPrefix. For now, just reject values that look + // like tagged or digested values. + if strings.ContainsRune(key, '@') { + return false, fmt.Errorf(`key %s contains a '@' character`, key) + } + + firstSlash := strings.IndexRune(key, '/') + isNamespaced := firstSlash != -1 + // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. + if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { + return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) + } // check if the provided key contains one or more subpaths. - return strings.ContainsRune(key, '/'), nil + return isNamespaced, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go deleted file mode 100644 index 0bf16125919..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go +++ /dev/null @@ -1,119 +0,0 @@ -package config - -import ( - "fmt" - "strings" - - "github.com/containers/image/v5/internal/pkg/keyctl" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// NOTE: none of the functions here are currently used. If we ever want to -// re-enable keyring support, we should introduce a similar built-in credential -// helpers as for `sysregistriesv2.AuthenticationFileHelper`. - -const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return "", "", err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return "", "", err - } - authData, err := key.Get() - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(authData), "\x00", 2) - if len(parts) != 2 { - return "", "", nil - } - return parts[0], parts[1], nil -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - userkeyring, err := keyctl.UserKeyring() - - if err != nil { - return err - } - key, err := userkeyring.Search(genDescription(registry)) - if err != nil { - return err - } - return key.Unlink() -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - keys, err := keyctl.ReadUserKeyring() - if err != nil { - return err - } - - userkeyring, err := keyctl.UserKeyring() - if err != nil { - return err - } - - for _, k := range keys { - keyAttr, err := k.Describe() - if err != nil { - return err - } - // split string "type;uid;gid;perm;description" - keyAttrs := strings.SplitN(keyAttr, ";", 5) - if len(keyAttrs) < 5 { - return errors.Errorf("Key attributes of %d are not available", k.ID()) - } - keyDescribe := keyAttrs[4] - if strings.HasPrefix(keyDescribe, keyDescribePrefix) { - err := keyctl.Unlink(userkeyring, k) - if err != nil { - return errors.Wrapf(err, "unlinking key %d", k.ID()) - } - logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) - } - } - return nil -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - keyring, err := keyctl.SessionKeyring() - if err != nil { - return err - } - id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) - if err != nil { - return err - } - - // sets all permission(view,read,write,search,link,set attribute) for current user - // it enables the user to search the key after it linked to user keyring and unlinked from session keyring - err = keyctl.SetPerm(id, keyctl.PermUserAll) - if err != nil { - return err - } - // link the key to userKeyring - userKeyring, err := keyctl.UserKeyring() - if err != nil { - return errors.Wrapf(err, "getting user keyring") - } - err = keyctl.Link(userKeyring, id) - if err != nil { - return errors.Wrapf(err, "linking the key to user keyring") - } - // unlink the key from session keyring - err = keyctl.Unlink(keyring, id) - if err != nil { - return errors.Wrapf(err, "unlinking the key from session keyring") - } - return nil -} - -func genDescription(registry string) string { //nolint:deadcode,unused - return fmt.Sprintf("%s%s", keyDescribePrefix, registry) -} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go deleted file mode 100644 index d9827d8edbc..00000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !linux && (!386 || !amd64) -// +build !linux -// +build !386 !amd64 - -package config - -func getAuthFromKernelKeyring(registry string) (string, string, error) { //nolint:deadcode,unused - return "", "", ErrNotSupported -} - -func deleteAuthFromKernelKeyring(registry string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func setAuthToKernelKeyring(registry, username, password string) error { //nolint:deadcode,unused - return ErrNotSupported -} - -func removeAllAuthFromKernelKeyring() error { //nolint:deadcode,unused - return ErrNotSupported -} diff --git a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go index fb0a15b993e..46c10ff631a 100644 --- a/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go @@ -118,6 +118,7 @@ type Resolved struct { } func (r *Resolved) addCandidate(named reference.Named) { + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed r.PullCandidates = append(r.PullCandidates, PullCandidate{named, false, r}) } @@ -138,6 +139,8 @@ const ( rationaleUSR // Resolved value has been selected by the user (via the prompt). rationaleUserSelection + // Resolved value has been enforced to use Docker Hub (via SystemContext). + rationaleEnforcedDockerHub ) // Description returns a human-readable description about the resolution @@ -152,6 +155,8 @@ func (r *Resolved) Description() string { return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription) case rationaleUSR: return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription) + case rationaleEnforcedDockerHub: + return fmt.Sprintf("Resolving %q to docker.io (%s)", r.userInput, r.originDescription) case rationaleUserSelection, rationaleNone: fallthrough default: @@ -265,8 +270,20 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } if !isShort { // no short name - named := reference.TagNameOnly(shortRef) // Make sure to add ":latest" if needed + resolved.addCandidate(shortRef) + return resolved, nil + } + + // Resolve to docker.io only if enforced by the caller (e.g., Podman's + // Docker-compatible REST API). + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "cannot normalize input: %q", name) + } resolved.addCandidate(named) + resolved.rationale = rationaleEnforcedDockerHub + resolved.originDescription = "enforced by caller" return resolved, nil } @@ -295,9 +312,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - resolved.addCandidate(namedAlias) resolved.rationale = rationaleAlias resolved.originDescription = aliasOriginDescription @@ -325,9 +339,6 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) { if err != nil { return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - resolved.addCandidate(named) } @@ -412,6 +423,23 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e var candidates []reference.Named + // Complete the candidates with the specified registries. + completeCandidates := func(registries []string) ([]reference.Named, error) { + for _, reg := range registries { + named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) + if err != nil { + return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) + } + named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed + candidates = append(candidates, named) + } + return candidates, nil + } + + if ctx != nil && ctx.PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub { + return completeCandidates([]string{"docker.io"}) + } + // Strip off the tag to normalize the short name for looking it up in // the config files. isTagged, isDigested, shortNameRepo, tag, digest := splitUserInput(shortRef) @@ -434,9 +462,7 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e return nil, err } } - // Make sure to add ":latest" if needed - namedAlias = reference.TagNameOnly(namedAlias) - + namedAlias = reference.TagNameOnly(namedAlias) // Make sure to add ":latest" if needed candidates = append(candidates, namedAlias) } @@ -447,16 +473,5 @@ func ResolveLocally(ctx *types.SystemContext, name string) ([]reference.Named, e } // Note that "localhost" has precedence over the unqualified-search registries. - for _, reg := range append([]string{"localhost"}, unqualifiedSearchRegistries...) { - named, err := reference.ParseNormalizedNamed(fmt.Sprintf("%s/%s", reg, name)) - if err != nil { - return nil, errors.Wrapf(err, "creating reference with unqualified-search registry %q", reg) - } - // Make sure to add ":latest" if needed - named = reference.TagNameOnly(named) - - candidates = append(candidates, named) - } - - return candidates, nil + return completeCandidates(append([]string{"localhost"}, unqualifiedSearchRegistries...)) } diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go new file mode 100644 index 00000000000..ba6d875bae6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/load.go @@ -0,0 +1,211 @@ +package sif + +import ( + "bufio" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +// injectedScriptTargetPath is the path injectedScript should be written to in the created image. +const injectedScriptTargetPath = "/podman/runscript" + +// parseDefFile parses a SIF definition file from reader, +// and returns non-trivial contents of the %environment and %runscript sections. +func parseDefFile(reader io.Reader) ([]string, []string, error) { + type parserState int + const ( + parsingOther parserState = iota + parsingEnvironment + parsingRunscript + ) + + environment := []string{} + runscript := []string{} + + state := parsingOther + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + switch { + case s == `%environment`: + state = parsingEnvironment + case s == `%runscript`: + state = parsingRunscript + case strings.HasPrefix(s, "%"): + state = parsingOther + case state == parsingEnvironment: + if s != "" && !strings.HasPrefix(s, "#") { + environment = append(environment, s) + } + case state == parsingRunscript: + runscript = append(runscript, s) + default: // parsingOther: ignore the line + } + } + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err) + } + return environment, runscript, nil +} + +// generateInjectedScript generates a shell script based on +// SIF definition file %environment and %runscript data, and returns it. +func generateInjectedScript(environment []string, runscript []string) []byte { + script := fmt.Sprintf("#!/bin/bash\n"+ + "%s\n"+ + "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n")) + return []byte(script) +} + +// processDefFile finds sif.DataDeffile in sifImage, if any, +// and returns: +// - the command to run +// - contents of a script to inject as injectedScriptTargetPath, or nil +func processDefFile(sifImage *sif.FileImage) (string, []byte, error) { + var environment, runscript []string + + desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile)) + if err == nil { + environment, runscript, err = parseDefFile(desc.GetReader()) + if err != nil { + return "", nil, err + } + } + + var command string + var injectedScript []byte + if len(environment) == 0 && len(runscript) == 0 { + command = "bash" + injectedScript = nil + } else { + injectedScript = generateInjectedScript(environment, runscript) + command = injectedScriptTargetPath + } + + return command, injectedScript, nil +} + +func writeInjectedScript(extractedRootPath string, injectedScript []byte) error { + if injectedScript == nil { + return nil + } + filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath) + parentDirPath := filepath.Dir(filePath) + if err := os.MkdirAll(parentDirPath, 0755); err != nil { + return fmt.Errorf("creating %s: %w", parentDirPath, err) + } + if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil { + return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err) + } + return nil +} + +// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath. +// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use, +// if necessary. +func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error { + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + defer os.RemoveAll(extractedRootPath) + + // Almost everything in extractedRootPath comes from squashFSPath. + conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./", + extractedRootPath, squashFSPath, extractedRootPath, tarPath) + script := "#!/bin/sh\n" + conversionCommand + "\n" + if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil { + return err + } + defer os.Remove(scriptPath) + + // On top of squashFSPath, we only add injectedScript, if necessary. + if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil { + return err + } + + logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand) + cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("converting image: %w, output: %s", err, string(output)) + } + logrus.Debugf("... finished converting squashfs to tar") + return nil +} + +// convertSIFToElements processes sifImage and creates/returns +// the relevant elements for constructing an OCI-like image: +// - A path to a tar file containing a root filesystem, +// - A command to run. +// The returned tar file path is inside tempDir, which can be assumed to be empty +// at start, and is exclusively used by the current process (i.e. it is safe +// to use hard-coded relative paths within it). +func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) { + // We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive, + // so we can just hard-code a set of unique values here. + // We create and/or manage cleanup of these two paths. + squashFSPath := filepath.Join(tempDir, "rootfs.squashfs") + tarPath := filepath.Join(tempDir, "rootfs.tar") + // We only allocate these paths, the user is responsible for cleaning them up. + extractedRootPath := filepath.Join(tempDir, "rootfs") + scriptPath := filepath.Join(tempDir, "script") + + succeeded := false + // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive + // for our use. + // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need + // to run both creation and consumption of extractedRootPath in the same fakeroot context. + // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2 + // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time. + // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser + // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy. + defer os.Remove(squashFSPath) + defer func() { + if !succeeded { + os.Remove(tarPath) + } + }() + + command, injectedScript, err := processDefFile(sifImage) + if err != nil { + return "", nil, err + } + + rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys)) + if err != nil { + return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err) + } + // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4 + // has an -o option that allows extracting a squashfs from the SIF file directly, + // but that version is not currently available in RHEL 8. + logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath) + if err := func() error { // A scope for defer + f, err := os.Create(squashFSPath) + if err != nil { + return err + } + defer f.Close() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil { + return err + } + return nil + }(); err != nil { + return "", nil, err + } + logrus.Debugf("... finished creating a temporary squashfs image") + + if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil { + return "", nil, err + } + succeeded = true + return tarPath, []string{command}, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go new file mode 100644 index 00000000000..ba95a469f36 --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -0,0 +1,217 @@ +package sif + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/sylabs/sif/v2/pkg/sif" +) + +type sifImageSource struct { + ref sifReference + workDir string + layerDigest digest.Digest + layerSize int64 + layerFile string + config []byte + configDigest digest.Digest + manifest []byte +} + +// getBlobInfo returns the digest, and size of the provided file. +func getBlobInfo(path string) (digest.Digest, int64, error) { + f, err := os.Open(path) + if err != nil { + return "", -1, fmt.Errorf("opening %q for reading: %w", path, err) + } + defer f.Close() + + // TODO: Instead of writing the tar file to disk, and reading + // it here again, stream the tar file to a pipe and + // compute the digest while writing it to disk. + logrus.Debugf("Computing a digest of the SIF conversion output...") + digester := digest.Canonical.Digester() + // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + size, err := io.Copy(digester.Hash(), f) + if err != nil { + return "", -1, fmt.Errorf("reading %q: %w", path, err) + } + digest := digester.Digest() + logrus.Debugf("... finished computing the digest of the SIF conversion output") + + return digest, size, nil +} + +// newImageSource returns an ImageSource for reading from an existing directory. +// newImageSource extracts SIF objects and saves them in a temp directory. +func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) { + sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY)) + if err != nil { + return nil, fmt.Errorf("loading SIF file: %w", err) + } + defer func() { + _ = sifImg.UnloadContainer() + }() + + workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + if err != nil { + return nil, fmt.Errorf("creating temp directory: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + os.RemoveAll(workDir) + } + }() + + layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir) + if err != nil { + return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err) + } + + layerDigest, layerSize, err := getBlobInfo(layerPath) + if err != nil { + return nil, fmt.Errorf("gathering blob information: %w", err) + } + + created := sifImg.ModifiedAt() + config := imgspecv1.Image{ + Created: &created, + Architecture: sifImg.PrimaryArch(), + OS: "linux", + Config: imgspecv1.ImageConfig{ + Cmd: commandLine, + }, + RootFS: imgspecv1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{layerDigest}, + }, + History: []imgspecv1.History{ + { + Created: &created, + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + Comment: "imported from SIF, uuid: " + sifImg.ID(), + }, + { + Created: &created, + CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]", + EmptyLayer: true, + }, + }, + } + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err) + } + configDigest := digest.Canonical.FromBytes(configBytes) + + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: imgspecv1.Descriptor{ + Digest: configDigest, + Size: int64(len(configBytes)), + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: []imgspecv1.Descriptor{{ + Digest: layerDigest, + Size: layerSize, + MediaType: imgspecv1.MediaTypeImageLayer, + }}, + } + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err) + } + + succeeded = true + return &sifImageSource{ + ref: ref, + workDir: workDir, + layerDigest: layerDigest, + layerSize: layerSize, + layerFile: layerPath, + config: configBytes, + configDigest: configDigest, + manifest: manifestBytes, + }, nil +} + +// Reference returns the reference used to set up this source. +func (s *sifImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *sifImageSource) Close() error { + return os.RemoveAll(s.workDir) +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *sifImageSource) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + switch info.Digest { + case s.configDigest: + return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil + case s.layerDigest: + reader, err := os.Open(s.layerFile) + if err != nil { + return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err) + } + return reader, s.layerSize, nil + default: + return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String()) + } +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.New("manifest lists are not supported by the sif transport") + } + return s.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by the sif transport") + } + return nil, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go new file mode 100644 index 00000000000..18d894bc35c --- /dev/null +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -0,0 +1,164 @@ +package sif + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strings" + + "github.com/containers/image/v5/directory/explicitfilepath" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for SIF images. +var Transport = sifTransport{} + +type sifTransport struct{} + +func (t sifTransport) Name() string { + return "sif" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) { + return NewReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error { + if !strings.HasPrefix(scope, "/") { + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) + } + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } + return nil +} + +// sifReference is an ImageReference for SIF images. +type sifReference struct { + // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! + // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on. + + // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid + // being exposed to symlinks and renames in the parent directories to the working directory). + // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) + file string // As specified by the user. May be relative, contain symlinks, etc. + resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. +} + +// There is no sif.ParseReference because it is rather pointless. +// Callers who need a transport-independent interface will go through +// sifTransport.ParseReference; callers who intentionally deal with SIF files +// can use sif.NewReference. + +// NewReference returns an image file reference for a specified path. +func NewReference(file string) (types.ImageReference, error) { + // We do not expose an API supplying the resolvedFile; we could, but recomputing it + // is generally cheap enough that we prefer being confident about the properties of resolvedFile. + resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) + if err != nil { + return nil, err + } + return sifReference{file: file, resolvedFile: resolved}, nil +} + +func (ref sifReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref sifReference) StringWithinTransport() string { + return ref.file +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref sifReference) DockerReference() reference.Named { + return nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref sifReference) PolicyConfigurationIdentity() string { + return ref.resolvedFile +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref sifReference) PolicyConfigurationNamespaces() []string { + res := []string{} + path := ref.resolvedFile + for { + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 || lastSlash == 0 { + break + } + path = path[:lastSlash] + res = append(res, path) + } + // Note that we do not include "/"; it is redundant with the default "" global default, + // and rejected by sifTransport.ValidatePolicyConfigurationScope above. + return res +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return nil, errors.New(`"sif:" locations can only be read from, not written to`) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + return errors.New("Deleting images not implemented for sif: images") +} diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index 07fdd42a9af..8e9ce0dd236 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -3,22 +3,46 @@ package signature import ( + "errors" "fmt" + "strings" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/opencontainers/go-digest" ) +// SignOptions includes optional parameters for signing container images. +type SignOptions struct { + // Passphare to use when signing with the key identity. + Passphrase string +} + // SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { +// using mech and keyIdentity, and the specified options. +func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) { manifestDigest, err := manifest.Digest(m) if err != nil { return nil, err } sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) + + var passphrase string + if options != nil { + passphrase = options.Passphrase + // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior. + if strings.Contains(passphrase, "\n") { + return nil, errors.New("invalid passphrase: must not contain a line break") + } + } + + return sig.sign(mech, keyIdentity, passphrase) +} + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil) } // VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go index 2c08c231ea8..9a32a43640e 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism.go @@ -9,13 +9,15 @@ import ( "io/ioutil" "strings" - "golang.org/x/crypto/openpgp" + // This code is used only to parse the data in an explicitly-untrusted + // code path, where cryptography is not relevant. For now, continue to + // use this frozen deprecated implementation. When mechanism_openpgp.go + // migrates to another implementation, this should migrate as well. + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // SigningMechanism abstracts a way to sign binary blobs and verify their signatures. // Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) type SigningMechanism interface { // Close removes resources associated with the mechanism, if any. Close() error @@ -34,6 +36,15 @@ type SigningMechanism interface { UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) } +// signingMechanismWithPassphrase is an internal extension of SigningMechanism. +type signingMechanismWithPassphrase interface { + SigningMechanism + + // Sign creates a (non-detached) signature of input using keyIdentity and passphrase. + // Fails with a SigningNotSupportedError if the mechanism does not support signing. + SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) +} + // SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. type SigningNotSupportedError string diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index 6ae74d430d6..c166fb32d89 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -5,11 +5,12 @@ package signature import ( "bytes" + "errors" "fmt" "io/ioutil" "os" - "github.com/mtrmac/gpgme" + "github.com/proglottis/gpgme" ) // A GPG/OpenPGP signing mechanism, implemented using gpgme. @@ -20,7 +21,7 @@ type gpgmeSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { ctx, err := newGPGMEContext(optionalDir) if err != nil { return nil, err @@ -35,7 +36,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") if err != nil { return nil, nil, err @@ -117,9 +118,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error { return nil } -// Sign creates a (non-detached) signature of input using keyIdentity. +// Sign creates a (non-detached) signature of input using keyIdentity and passphrase. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { key, err := m.ctx.GetKey(keyIdentity, true) if err != nil { return nil, err @@ -133,12 +134,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, if err != nil { return nil, err } + + if passphrase != "" { + // Callback to write the passphrase to the specified file descriptor. + callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error { + if prevWasBad { + return errors.New("bad passphrase") + } + _, err := gpgmeFD.WriteString(passphrase + "\n") + return err + } + if err := m.ctx.SetCallback(callback); err != nil { + return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err) + } + + // Loopback mode will use the callback instead of prompting the user. + if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil { + return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err) + } + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { return nil, err } return sigBuffer.Bytes(), nil } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { signedBuffer := bytes.Buffer{} diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index 0a09788f989..7a31425f199 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -14,7 +14,13 @@ import ( "time" "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" + // This is a fallback code; the primary recommendation is to use the gpgme mechanism + // implementation, which is out-of-process and more appropriate for handling long-term private key material + // than any Go implementation. + // For this verify-only fallback, we haven't reviewed any of the + // existing alternatives to choose; so, for now, continue to + // use this frozen deprecated implementation. + "golang.org/x/crypto/openpgp" //nolint:staticcheck ) // A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. @@ -24,7 +30,7 @@ type openpgpSigningMechanism struct { // newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. // The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { +func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -55,7 +61,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er // recognizes _only_ public keys from the supplied blob, and returns the identities // of these keys. // The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { +func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) { m := &openpgpSigningMechanism{ keyring: openpgp.EntityList{}, } @@ -104,10 +110,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error { // Sign creates a (non-detached) signature of input using keyIdentity. // Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { +func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) { return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") } +// Sign creates a (non-detached) signature of input using keyIdentity. +// Fails with a SigningNotSupportedError if the mechanism does not support signing. +func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + return m.SignWithPassphrase(input, keyIdentity, "") +} + // Verify parses unverifiedSignature and returns the content and the signer's identity func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go index 09f4f85e0a8..05bf8229e7d 100644 --- a/vendor/github.com/containers/image/v5/signature/signature.go +++ b/vendor/github.com/containers/image/v5/signature/signature.go @@ -190,12 +190,20 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { // of the system just because it is a private key — actually the presence of a private key // on the system increases the likelihood of an a successful attack on that private key // on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { +func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) { json, err := json.Marshal(s) if err != nil { return nil, err } + if newMech, ok := mech.(signingMechanismWithPassphrase); ok { + return newMech.SignWithPassphrase(json, keyIdentity, passphrase) + } + + if passphrase != "" { + return nil, errors.New("signing mechanism does not support passphrases") + } + return mech.Sign(json, keyIdentity) } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 7329ef6eee0..bcb09c83ce8 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -18,9 +18,9 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/tmpdir" - internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" @@ -446,15 +446,20 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) } -// PutBlobWithOptions is a wrapper around PutBlob. If options.LayerIndex is -// set, the blob will be committed directly. Either by the calling goroutine -// or by another goroutine already committing layers. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with non "WithOptions" functions is not -// supported. -func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options internalTypes.PutBlobOptions) (types.BlobInfo, error) { - info, err := s.PutBlob(ctx, stream, blobinfo, options.Cache, options.IsConfig) +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (s *storageImageDestination) HasThreadSafePutBlob() bool { + return true +} + +// PutBlobWithOptions writes contents of stream and returns data representing the result. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// inputInfo.MediaType describes the blob format, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { + info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) if err != nil { return info, err } @@ -466,11 +471,6 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) } -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (s *storageImageDestination) HasThreadSafePutBlob() bool { - return true -} - // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. @@ -480,6 +480,15 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + return s.PutBlobWithOptions(ctx, stream, blobinfo, private.PutBlobOptions{ + Cache: cache, + IsConfig: isConfig, + }) +} + +// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. errorBlobInfo := types.BlobInfo{ @@ -534,7 +543,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). - cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) + options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) return types.BlobInfo{ Digest: blobDigest, Size: blobSize, @@ -542,80 +551,40 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, }, nil } -// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If -// options.LayerIndex is set, the reused blob will be recoreded as already -// pulled. -// -// Please not that TryReusingBlobWithOptions and PutBlobWithOptions *must* be -// used the together. Mixing the two with the non "WithOptions" functions -// is not supported. -func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options internalTypes.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - reused, info, err := s.tryReusingBlobWithSrcRef(ctx, blobinfo, options.Cache, options.CanSubstitute, options.SrcRef) - if err != nil || !reused || options.LayerIndex == nil { - return reused, info, err - } - - return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) -} - -// tryReusingBlobWithSrcRef is a wrapper around TryReusingBlob. -// If ref is provided, this function first tries to get layer from Additional Layer Store. -func (s *storageImageDestination) tryReusingBlobWithSrcRef(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool, ref reference.Named) (bool, types.BlobInfo, error) { - // lock the entire method as it executes fairly quickly - s.lock.Lock() - defer s.lock.Unlock() - - if ref != nil { - // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, ref.String()) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) - } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[blobinfo.Digest] = aLayer - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: aLayer.CompressedSize(), - MediaType: blobinfo.MediaType, - }, nil - } - } - - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} - type zstdFetcher struct { - stream internalTypes.ImageSourceSeekable - ctx context.Context - blobInfo types.BlobInfo + chunkAccessor private.BlobChunkAccessor + ctx context.Context + blobInfo types.BlobInfo } -// GetBlobAt converts from chunked.GetBlobAt to ImageSourceSeekable.GetBlobAt. +// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - var newChunks []internalTypes.ImageSourceChunk + var newChunks []private.ImageSourceChunk for _, v := range chunks { - i := internalTypes.ImageSourceChunk{ + i := private.ImageSourceChunk{ Offset: v.Offset, Length: v.Length, } newChunks = append(newChunks, i) } - rc, errs, err := f.stream.GetBlobAt(f.ctx, f.blobInfo, newChunks) - if _, ok := err.(internalTypes.BadPartialRequestError); ok { + rc, errs, err := f.chunkAccessor.GetBlobAt(f.ctx, f.blobInfo, newChunks) + if _, ok := err.(private.BadPartialRequestError); ok { err = chunked.ErrBadRequest{} } return rc, errs, err } -// PutBlobPartial attempts to create a blob using the data that is already present at the destination storage. stream is accessed -// in a non-sequential way to retrieve the missing chunks. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream internalTypes.ImageSourceSeekable, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { +// PutBlobPartial attempts to create a blob using the data that is already present +// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. +// It is available only if SupportsPutBlobPartial(). +// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller +// should fall back to PutBlobWithOptions. +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) { fetcher := zstdFetcher{ - stream: stream, - ctx: ctx, - blobInfo: srcInfo, + chunkAccessor: chunkAccessor, + ctx: ctx, + blobInfo: srcInfo, } differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) @@ -640,6 +609,22 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int return srcInfo, nil } +// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { + reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) + if err != nil || !reused || options.LayerIndex == nil { + return reused, info, err + } + + return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) +} + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -650,16 +635,36 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, stream int // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + return s.TryReusingBlobWithOptions(ctx, blobinfo, private.TryReusingBlobOptions{ + Cache: cache, + CanSubstitute: canSubstitute, + }) +} + +// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// The caller must arrange the blob to be eventually commited using s.commitLayer(). +func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() - return s.tryReusingBlobLocked(ctx, blobinfo, cache, canSubstitute) -} + if options.SrcRef != nil { + // Check if we have the layer in the underlying additional layer store. + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, types.BlobInfo{}, errors.Wrapf(err, `looking for compressed layers with digest %q and labels`, blobinfo.Digest) + } else if err == nil { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[blobinfo.Digest] = aLayer + return true, types.BlobInfo{ + Digest: blobinfo.Digest, + Size: aLayer.CompressedSize(), + MediaType: blobinfo.MediaType, + }, nil + } + } -// tryReusingBlobLocked implements a core functionality of TryReusingBlob. -// This must be called with a lock being held on storageImageDestination. -func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if blobinfo.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) } @@ -708,9 +713,9 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob // Does the blob correspond to a known DiffID which we already have available? // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the - // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. - if canSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. + if options.CanSubstitute || blobinfo.Size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { return false, types.BlobInfo{}, errors.Wrapf(err, `looking for layers with digest %q`, uncompressedDigest) @@ -720,8 +725,8 @@ func (s *storageImageDestination) tryReusingBlobLocked(ctx context.Context, blob s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest return true, blobinfo, nil } - if !canSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) + if !options.CanSubstitute { + return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) } s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest return true, types.BlobInfo{ @@ -1261,6 +1266,11 @@ func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { return true // Yes, we want the unmodified manifest } +// SupportsPutBlobPartial returns true if PutBlobPartial is supported. +func (s *storageImageDestination) SupportsPutBlobPartial() bool { + return true +} + // PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { sizes := []int{} diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 2110a091d28..0bae8b2599d 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -12,7 +12,9 @@ import ( _ "github.com/containers/image/v5/oci/archive" _ "github.com/containers/image/v5/oci/layout" _ "github.com/containers/image/v5/openshift" + _ "github.com/containers/image/v5/sif" _ "github.com/containers/image/v5/tarball" + // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go "github.com/containers/image/v5/transports" diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index c98a6c6fda0..dcff8caf76b 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -561,6 +561,11 @@ type SystemContext struct { UserShortNameAliasConfPath string // If set, short-name resolution in pkg/shortnames must follow the specified mode ShortNameMode *ShortNameMode + // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and + // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce + // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this + // specific context. + PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool // If not "", overrides the default path for the authentication file, but only new format files AuthFilePath string // if not "", overrides the default path for the authentication file, but with the legacy format; diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index ffb2a4ce259..30d772c30d3 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 17 + VersionMinor = 20 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md new file mode 100644 index 00000000000..83b061c70b2 --- /dev/null +++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The libtrust Project Community Code of Conduct + +The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md new file mode 100644 index 00000000000..fab2c41e89b --- /dev/null +++ b/vendor/github.com/containers/libtrust/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the libtrust Project + +The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index d080d790c1a..726acc3aef3 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,8 +17,8 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-34" - PRIOR_FEDORA_NAME: "fedora-33" + FEDORA_NAME: "fedora-35" + PRIOR_FEDORA_NAME: "fedora-34" UBUNTU_NAME: "ubuntu-2104" # GCE project where images live diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index dbc1f7c9987..d7ca0c1c41f 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -51,6 +51,9 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*. containers-storage: $(sources) ## build using gc on the host $(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage +codespell: + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w + binary local-binary: containers-storage local-gccgo: ## build using gccgo on the host diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index bf50e910e62..5edffce6d57 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.37.0 +1.39.0 diff --git a/vendor/github.com/containers/storage/Vagrantfile b/vendor/github.com/containers/storage/Vagrantfile deleted file mode 100644 index c82c1f81bd3..00000000000 --- a/vendor/github.com/containers/storage/Vagrantfile +++ /dev/null @@ -1,25 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : -# -# The fedora/28-cloud-base and debian/jessie64 boxes are also available for -# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to -# "virtualbox" to use them instead. -# -Vagrant.configure("2") do |config| - config.vm.define "fedora" do |c| - c.vm.box = "fedora/28-cloud-base" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end - config.vm.define "debian" do |c| - c.vm.box = "debian/jessie64" - c.vm.synced_folder ".", "/vagrant", type: "rsync", - rsync__exclude: "bundles", rsync__args: ["-vadz", "--delete"] - c.vm.provision "shell", inline: <<-SHELL - sudo /vagrant/vagrant/provision.sh - SHELL - end -end diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index b4f773f2b73..a8b20f03a01 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -84,8 +84,17 @@ type ContainerStore interface { // SetNames updates the list of names associated with the container // with the specified ID. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the container with + // the specified id. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the container with + // the specified id. + RemoveNames(id string, names []string) error + // Get retrieves information about a container given an ID or name. Get(id string) (*Container, error) @@ -324,6 +333,12 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) } } + if err := hasOverlappingRanges(options.UIDMap); err != nil { + return nil, err + } + if err := hasOverlappingRanges(options.GIDMap); err != nil { + return nil, err + } if err == nil { container = &Container{ ID: id, @@ -371,22 +386,40 @@ func (r *containerStore) removeName(container *Container, name string) { container.Names = stringSliceWithoutValue(container.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *containerStore) SetNames(id string, names []string) error { - names = dedupeNames(names) - if container, ok := r.lookup(id); ok { - for _, name := range container.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherContainer, ok := r.byname[name]; ok { - r.removeName(otherContainer, name) - } - r.byname[name] = container + return r.updateNames(id, names, setNames) +} + +func (r *containerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *containerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *containerStore) updateNames(id string, names []string, op updateNameOperation) error { + container, ok := r.lookup(id) + if !ok { + return ErrContainerUnknown + } + oldNames := container.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) } - container.Names = names - return r.Save() + r.byname[name] = container } - return ErrContainerUnknown + container.Names = names + return r.Save() } func (r *containerStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index 63bfd2d136f..2db6764c91b 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -50,11 +50,14 @@ func chownByMapsMain() { if len(toHost.UIDs()) == 0 && len(toHost.GIDs()) == 0 { toHost = nil } + + chowner := newLChowner() + chown := func(path string, info os.FileInfo, _ error) error { if path == "." { return nil } - return platformLChown(path, info, toHost, toContainer) + return chowner.LChown(path, info, toHost, toContainer) } if err := pwalk.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go index 0387adfc123..76823d532a7 100644 --- a/vendor/github.com/containers/storage/drivers/chown_unix.go +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package graphdriver @@ -6,17 +7,50 @@ import ( "errors" "fmt" "os" + "sync" "syscall" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type inode struct { + Dev uint64 + Ino uint64 +} + +type platformChowner struct { + mutex sync.Mutex + inodes map[inode]bool +} + +func newLChowner() *platformChowner { + return &platformChowner{ + inodes: make(map[inode]bool), + } +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { st, ok := info.Sys().(*syscall.Stat_t) if !ok { return nil } + + i := inode{ + Dev: uint64(st.Dev), + Ino: uint64(st.Ino), + } + c.mutex.Lock() + _, found := c.inodes[i] + if !found { + c.inodes[i] = true + } + c.mutex.Unlock() + + if found { + return nil + } + // Map an on-disk UID/GID pair from host to container // using the first map, then back to the host using the // second map. Skip that first step if they're 0, to diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go index 31bd5bb52dd..1845a4e086c 100644 --- a/vendor/github.com/containers/storage/drivers/chown_windows.go +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package graphdriver @@ -9,6 +10,13 @@ import ( "github.com/containers/storage/pkg/idtools" ) -func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { +type platformChowner struct { +} + +func newLChowner() *platformChowner { + return &platformChowner{} +} + +func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { return &os.PathError{"lchown", path, syscall.EWINDOWS} } diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index a534630df08..b7e681ace45 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -138,6 +138,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p if parent != "" { options := MountOpts{ MountLabel: mountLabel, + Options: []string{"ro"}, } parentFs, err = driver.Get(parent, options) if err != nil { diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 1efe7316d3e..a780ef5da3e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package overlay @@ -155,6 +156,15 @@ func hasMetacopyOption(opts []string) bool { return false } +func stripOption(opts []string, option string) []string { + for i, s := range opts { + if s == option { + return stripOption(append(opts[:i], opts[i+1:]...), option) + } + } + return opts +} + func hasVolatileOption(opts []string) bool { for _, s := range opts { if s == "volatile" { @@ -282,6 +292,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) backingFs = fsName } + runhome := filepath.Join(options.RunRoot, filepath.Base(home)) + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return nil, err + } + + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if opts.mountProgram == "" { + if supported, err := SupportsNativeOverlay(home, runhome); err != nil { + return nil, err + } else if !supported { + if path, err := exec.LookPath("fuse-overlayfs"); err == nil { + opts.mountProgram = path + } + } + } + if opts.mountProgram != "" { if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil { m := os.FileMode(0700) @@ -306,20 +341,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - - // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil { - return nil, err - } - runhome := filepath.Join(options.RunRoot, filepath.Base(home)) - if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil { - return nil, err - } - var usingMetacopy bool var supportsDType bool var supportsVolatile *bool @@ -559,14 +580,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) ( return err } -func SupportsNativeOverlay(graphroot, rundir string) (bool, error) { - if os.Geteuid() != 0 || graphroot == "" || rundir == "" { +func SupportsNativeOverlay(home, runhome string) (bool, error) { + if os.Geteuid() != 0 || home == "" || runhome == "" { return false, nil } - home := filepath.Join(graphroot, "overlay") - runhome := filepath.Join(rundir, "overlay") - var contents string flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home)) if err == nil { @@ -881,11 +899,18 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err != nil { return err } + + idPair := idtools.IDPair{ + UID: rootUID, + GID: rootGID, + } + // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(path.Join(d.home, linkDir), 0700, idPair); err != nil { return err } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + + if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0700, idPair); err != nil { return err } if parent != "" { @@ -896,14 +921,16 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable rootUID = int(st.UID()) rootGID = int(st.GID()) } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { - os.RemoveAll(dir) + if err2 := os.RemoveAll(dir); err2 != nil { + logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) + } } }() @@ -1039,17 +1066,22 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { + p, _ := d.dir2(id) + return p +} + +func (d *Driver) dir2(id string) (string, bool) { newpath := path.Join(d.home, id) if _, err := os.Stat(newpath); err != nil { for _, p := range d.AdditionalImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l + return l, true } } } - return newpath + return newpath, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1145,6 +1177,9 @@ func (d *Driver) Remove(id string) error { // under each layer has a symlink created for it under the linkDir. If the symlink does not // exist, it creates them func (d *Driver) recreateSymlinks() error { + // We have at most 3 corrective actions per layer, so 10 iterations is plenty. + const maxIterations = 10 + // List all the directories under the home directory dirs, err := ioutil.ReadDir(d.home) if err != nil { @@ -1162,6 +1197,7 @@ func (d *Driver) recreateSymlinks() error { // Keep looping as long as we take some corrective action in each iteration var errs *multierror.Error madeProgress := true + iterations := 0 for madeProgress { errs = nil madeProgress = false @@ -1175,7 +1211,7 @@ func (d *Driver) recreateSymlinks() error { // Read the "link" file under each layer to get the name of the symlink data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link")) if err != nil { - errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir)) + errs = multierror.Append(errs, errors.Wrapf(err, "reading name of symlink for %q", dir.Name())) continue } linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) @@ -1212,7 +1248,12 @@ func (d *Driver) recreateSymlinks() error { if len(targetComponents) != 3 || targetComponents[0] != ".." || targetComponents[2] != "diff" { errs = multierror.Append(errs, errors.Errorf("link target of %q looks weird: %q", link, target)) // force the link to be recreated on the next pass - os.Remove(filepath.Join(linksDir, link.Name())) + if err := os.Remove(filepath.Join(linksDir, link.Name())); err != nil { + if !os.IsNotExist(err) { + errs = multierror.Append(errs, errors.Wrapf(err, "removing link %q", link)) + } // else don’t report any error, but also don’t set madeProgress. + continue + } madeProgress = true continue } @@ -1222,6 +1263,8 @@ func (d *Driver) recreateSymlinks() error { linkFile := filepath.Join(d.dir(targetID), "link") data, err := ioutil.ReadFile(linkFile) if err != nil || string(data) != link.Name() { + // NOTE: If two or more links point to the same target, we will update linkFile + // with every value of link.Name(), and set madeProgress = true every time. if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil { errs = multierror.Append(errs, errors.Wrapf(err, "correcting link for layer %s", targetID)) continue @@ -1229,6 +1272,11 @@ func (d *Driver) recreateSymlinks() error { madeProgress = true } } + iterations++ + if iterations >= maxIterations { + errs = multierror.Append(errs, fmt.Errorf("Reached %d iterations in overlay graph driver’s recreateSymlink, giving up", iterations)) + break + } } if errs != nil { return errs.ErrorOrNil() @@ -1244,16 +1292,20 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) - dir := d.dir(id) + dir, inAdditionalStore := d.dir2(id) if _, err := os.Stat(dir); err != nil { return "", err } - readWrite := true + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { disableShifting = true } + logLevel := logrus.WarnLevel + if unshare.IsRootless() { + logLevel = logrus.DebugLevel + } optsList := options.Options if len(optsList) == 0 { optsList = strings.Split(d.options.mountOptions, ",") @@ -1262,16 +1314,18 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // options otherwise the kernel refuses to follow the metacopy xattr. if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) { if d.usingMetacopy { + logrus.StandardLogger().Logf(logrus.DebugLevel, "Adding metacopy option, configured globally") optsList = append(optsList, "metacopy=on") - } else { - logLevel := logrus.WarnLevel - if unshare.IsRootless() { - logLevel = logrus.DebugLevel - } - logrus.StandardLogger().Logf(logLevel, "Ignoring metacopy option from storage.conf, not supported with booted kernel") } } } + if !d.usingMetacopy { + if hasMetacopyOption(optsList) { + logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel") + } + optsList = stripOption(optsList, "metacopy=on") + } + for _, o := range optsList { if o == "ro" { readWrite = false @@ -1416,41 +1470,37 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO workdir := path.Join(dir, "work") - var opts string - if readWrite { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) - } else { - opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) - } - if len(optsList) > 0 { - opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts) - } - if d.options.mountProgram == "" && unshare.IsRootless() { - opts = fmt.Sprintf("%s,userxattr", opts) + optsList = append(optsList, "userxattr") } - // If "volatile" is not supported by the file system, just ignore the request - if options.Volatile && !hasVolatileOption(strings.Split(opts, ",")) { + if options.Volatile && !hasVolatileOption(optsList) { supported, err := d.getSupportsVolatile() if err != nil { return "", err } + // If "volatile" is not supported by the file system, just ignore the request if supported { - opts = fmt.Sprintf("%s,volatile", opts) + optsList = append(optsList, "volatile") } } + var opts string + if readWrite { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir) + } else { + opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) + } + mountData := label.FormatMountLabel(opts, options.MountLabel) mountFunc := unix.Mount mountTarget := mergedDir pageSize := unix.Getpagesize() - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. if d.options.mountProgram != "" { mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { if !disableShifting { @@ -1476,18 +1526,26 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } return nil } - } else if len(mountData) > pageSize { + } else if len(mountData) >= pageSize { + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + workdir = path.Join(id, "work") //FIXME: We need to figure out to get this to work with additional stores if readWrite { diffDir := path.Join(id, "diff") opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir) } else { - opts = fmt.Sprintf("lowerdir=%s", strings.Join(absLowers, ":")) + opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":")) + } + if len(optsList) > 0 { + opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ",")) } mountData = label.FormatMountLabel(opts, options.MountLabel) - if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label %q too large %d > page size %d", options.MountLabel, len(mountData), pageSize) + if len(mountData) >= pageSize { + return "", fmt.Errorf("cannot mount layer, mount label %q too large %d >= page size %d", options.MountLabel, len(mountData), pageSize) } mountFunc = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go index 5fc810b89d4..de6e377541c 100644 --- a/vendor/github.com/containers/storage/errors.go +++ b/vendor/github.com/containers/storage/errors.go @@ -1,6 +1,8 @@ package storage import ( + "errors" + "github.com/containers/storage/types" ) @@ -55,4 +57,9 @@ var ( ErrStoreIsReadOnly = types.ErrStoreIsReadOnly // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = types.ErrNotSupported + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = types.ErrInvalidMappings + // ErrInvalidNameOperation is returned when updateName is called with invalid operation. + // Internal error + errInvalidUpdateNameOperation = errors.New("invalid update name operation") ) diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index f870b9ceed3..0a06a43235f 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -1,6 +1,9 @@ package storage import ( + "fmt" + "strings" + "github.com/containers/storage/pkg/idtools" "github.com/google/go-intervals/intervalset" "github.com/pkg/errors" @@ -218,3 +221,45 @@ func maxInt(a, b int) int { } return a } + +func hasOverlappingRanges(mappings []idtools.IDMap) error { + hostIntervals := intervalset.Empty() + containerIntervals := intervalset.Empty() + + var conflicts []string + + for _, m := range mappings { + c := interval{start: m.ContainerID, end: m.ContainerID + m.Size} + h := interval{start: m.HostID, end: m.HostID + m.Size} + + added := false + overlaps := false + + containerIntervals.IntervalsBetween(c, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + added = true + } + containerIntervals.Add(intervalset.NewSet([]intervalset.Interval{c})) + + hostIntervals.IntervalsBetween(h, func(x intervalset.Interval) bool { + overlaps = true + return false + }) + if overlaps && !added { + conflicts = append(conflicts, fmt.Sprintf("%v:%v:%v", m.ContainerID, m.HostID, m.Size)) + } + hostIntervals.Add(intervalset.NewSet([]intervalset.Interval{h})) + } + + if conflicts != nil { + if len(conflicts) == 1 { + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mapping %s conflicts with other mappings", conflicts[0]) + } + return errors.Wrapf(ErrInvalidMappings, "the specified UID and/or GID mappings %s conflict with other mappings", strings.Join(conflicts, ", ")) + } + return nil +} diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index bca25a65b8c..a4c3ed22c75 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -136,8 +136,19 @@ type ImageStore interface { // SetNames replaces the list of names associated with an image with the // supplied values. The values are expected to be valid normalized // named image references. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + AddNames(id string, names []string) error + + // RemoveNames removes the supplied values from the list of names associated with the image with + // the specified id. The values are expected to be valid normalized + // named image references. + RemoveNames(id string, names []string) error + // Delete removes the record of the image. Delete(id string) error @@ -425,37 +436,36 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c if created.IsZero() { created = time.Now().UTC() } - if err == nil { - image = &Image{ - ID: id, - Digest: searchableDigest, - Digests: nil, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: created, - Flags: make(map[string]interface{}), - } - err := image.recomputeDigests() - if err != nil { - return nil, errors.Wrapf(err, "error validating digests for new image") - } - r.images = append(r.images, image) - r.idindex.Add(id) - r.byid[id] = image - for _, name := range names { - r.byname[name] = image - } - for _, digest := range image.Digests { - list := r.bydigest[digest] - r.bydigest[digest] = append(list, image) - } - err = r.Save() - image = copyImage(image) + + image = &Image{ + ID: id, + Digest: searchableDigest, + Digests: nil, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + BigDataDigests: make(map[string]digest.Digest), + Created: created, + Flags: make(map[string]interface{}), + } + err = image.recomputeDigests() + if err != nil { + return nil, errors.Wrapf(err, "error validating digests for new image") + } + r.images = append(r.images, image) + r.idindex.Add(id) + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + for _, digest := range image.Digests { + list := r.bydigest[digest] + r.bydigest[digest] = append(list, image) } + err = r.Save() + image = copyImage(image) return image, err } @@ -506,26 +516,44 @@ func (i *Image) addNameToHistory(name string) { i.NamesHistory = dedupeNames(append([]string{name}, i.NamesHistory...)) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *imageStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *imageStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *imageStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) } - names = dedupeNames(names) - if image, ok := r.lookup(id); ok { - for _, name := range image.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherImage, ok := r.byname[name]; ok { - r.removeName(otherImage, name) - } - r.byname[name] = image - image.addNameToHistory(name) + image, ok := r.lookup(id) + if !ok { + return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + oldNames := image.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) } - image.Names = names - return r.Save() + r.byname[name] = image + image.addNameToHistory(name) } - return errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + image.Names = names + return r.Save() } func (r *imageStore) Delete(id string) error { diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index fbf6ad3621f..8a5616dfcb6 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -23,6 +23,7 @@ import ( "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/tarlog" "github.com/containers/storage/pkg/truncindex" + multierror "github.com/hashicorp/go-multierror" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux/label" @@ -220,8 +221,17 @@ type LayerStore interface { // SetNames replaces the list of names associated with a layer with the // supplied values. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the supplied values to the list of names associated with the layer with the + // specified id. + AddNames(id string, names []string) error + + // RemoveNames remove the supplied values from the list of names associated with the layer with the + // specified id. + RemoveNames(id string, names []string) error + // Delete deletes a layer with the specified name or ID. Delete(id string) error @@ -398,14 +408,13 @@ func (r *layerStore) Load() error { if layer.Flags == nil { layer.Flags = make(map[string]interface{}) } - if cleanup, ok := layer.Flags[incompleteFlag]; ok { - if b, ok := cleanup.(bool); ok && b { - err = r.deleteInternal(layer.ID) - if err != nil { - break - } - shouldSave = true + if layerHasIncompleteFlag(layer) { + logrus.Warnf("Found incomplete layer %#v, deleting it", layer.ID) + err = r.deleteInternal(layer.ID) + if err != nil { + break } + shouldSave = true } } } @@ -741,26 +750,17 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab } if moreOptions.TemplateLayer != "" { if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) - } - return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer) + return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id) } oldMappings = templateIDMappings } else { if writeable { if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating read-write layer") + return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id) } } else { if err = r.driver.Create(id, parent, &opts); err != nil { - if id != "" { - return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) - } - return nil, -1, errors.Wrapf(err, "error creating layer") + return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id) } } oldMappings = parentMappings @@ -769,7 +769,9 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil { // We don't have a record of this layer, but at least // try to clean it up underneath us. - r.driver.Remove(id) + if err2 := r.driver.Remove(id); err2 != nil { + logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2) + } return nil, -1, err } } @@ -794,21 +796,26 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab for flag, value := range flags { layer.Flags[flag] = value } + savedIncompleteLayer := false if diff != nil { layer.Flags[incompleteFlag] = true err = r.Save() if err != nil { // We don't have a record of this layer, but at least // try to clean it up underneath us. - r.driver.Remove(id) + if err2 := r.driver.Remove(id); err2 != nil { + logrus.Errorf("While recovering from a failure saving incomplete layer metadata, error deleting layer %#v: %v", id, err2) + } return nil, -1, err } + savedIncompleteLayer = true size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff) if err != nil { - if r.Delete(layer.ID) != nil { + if err2 := r.Delete(layer.ID); err2 != nil { // Either a driver error or an error saving. // We now have a layer that's been marked for // deletion but which we failed to remove. + logrus.Errorf("While recovering from a failure applying layer diff, error deleting layer %#v: %v", layer.ID, err2) } return nil, -1, err } @@ -816,9 +823,20 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab } err = r.Save() if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) + if savedIncompleteLayer { + if err2 := r.Delete(layer.ID); err2 != nil { + // Either a driver error or an error saving. + // We now have a layer that's been marked for + // deletion but which we failed to remove. + logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v: %v", layer.ID, err2) + } + } else { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + if err2 := r.driver.Remove(id); err2 != nil { + logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v in graph driver: %v", id, err2) + } + } return nil, -1, err } layer = copyLayer(layer) @@ -1031,25 +1049,43 @@ func (r *layerStore) removeName(layer *Layer, name string) { layer.Names = stringSliceWithoutValue(layer.Names, name) } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (r *layerStore) SetNames(id string, names []string) error { + return r.updateNames(id, names, setNames) +} + +func (r *layerStore) AddNames(id string, names []string) error { + return r.updateNames(id, names, addNames) +} + +func (r *layerStore) RemoveNames(id string, names []string) error { + return r.updateNames(id, names, removeNames) +} + +func (r *layerStore) updateNames(id string, names []string, op updateNameOperation) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) } - names = dedupeNames(names) - if layer, ok := r.lookup(id); ok { - for _, name := range layer.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherLayer, ok := r.byname[name]; ok { - r.removeName(otherLayer, name) - } - r.byname[name] = layer + layer, ok := r.lookup(id) + if !ok { + return ErrLayerUnknown + } + oldNames := layer.Names + names, err := applyNameOperation(oldNames, names, op) + if err != nil { + return err + } + for _, name := range oldNames { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) } - layer.Names = names - return r.Save() + r.byname[name] = layer } - return ErrLayerUnknown + layer.Names = names + return r.Save() } func (r *layerStore) datadir(id string) string { @@ -1148,6 +1184,17 @@ func (r *layerStore) tspath(id string) string { return filepath.Join(r.layerdir, id+tarSplitSuffix) } +// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true +func layerHasIncompleteFlag(layer *Layer) bool { + // layer.Flags[…] is defined to succeed and return ok == false if Flags == nil + if flagValue, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := flagValue.(bool); ok && b { + return true + } + } + return false +} + func (r *layerStore) deleteInternal(id string) error { if !r.IsReadWrite() { return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) @@ -1156,6 +1203,18 @@ func (r *layerStore) deleteInternal(id string) error { if !ok { return ErrLayerUnknown } + // Ensure that if we are interrupted, the layer will be cleaned up. + if !layerHasIncompleteFlag(layer) { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + layer.Flags[incompleteFlag] = true + if err := r.Save(); err != nil { + return err + } + } + // We never unset incompleteFlag; below, we remove the entire object from r.layers. + id = layer.ID err := r.driver.Remove(id) if err != nil { @@ -1463,34 +1522,48 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, } return maybeCompressReadCloser(diff) } - defer tsfile.Close() decompressor, err := pgzip.NewReader(tsfile) if err != nil { - return nil, err - } - defer decompressor.Close() - - tsbytes, err := ioutil.ReadAll(decompressor) - if err != nil { + if e := tsfile.Close(); e != nil { + logrus.Debug(e) + } return nil, err } - metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + metadata = storage.NewJSONUnpacker(decompressor) fgetter, err := r.newFileGetter(to) if err != nil { - return nil, err + errs := multierror.Append(nil, errors.Wrapf(err, "creating file-getter")) + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + return nil, errs.ErrorOrNil() } tarstream := asm.NewOutputTarStream(fgetter, metadata) rc := ioutils.NewReadCloserWrapper(tarstream, func() error { - err1 := tarstream.Close() - err2 := fgetter.Close() - if err2 == nil { - return err1 + var errs *multierror.Error + if err := decompressor.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing decompressor")) + } + if err := tsfile.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing tarstream headers")) + } + if err := tarstream.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing reconstructed tarstream")) + } + if err := fgetter.Close(); err != nil { + errs = multierror.Append(errs, errors.Wrapf(err, "closing file-getter")) + } + if errs != nil { + return errs.ErrorOrNil() } - return err2 + return nil }) return maybeCompressReadCloser(rc) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 76544ff289b..677a15edd29 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -77,6 +77,10 @@ const ( containersOverrideXattr = "user.containers.override_stat" ) +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, +} + // Archiver allows the reuse of most utility functions of this package with a // pluggable Untar function. To facilitate the passing of specific id mappings // for untar, an archiver can be created with maps which will then be passed to @@ -507,6 +511,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } } + if fi.Mode()&os.ModeSocket != 0 { + logrus.Warnf("archive: skipping %q since it is a socket", path) + return nil + } hdr, err := FileInfoHeader(name, fi, link) if err != nil { @@ -743,6 +751,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L var errs []string for key, value := range hdr.Xattrs { + if _, found := xattrsToIgnore[key]; found { + continue + } if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if errors.Is(err, syscall.ENOTSUP) || (inUserns && errors.Is(err, syscall.EPERM)) { // We ignore errors here because not all graphdrivers support @@ -962,7 +973,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) buffer := make([]byte, 1<<20) + doChown := !options.NoLchown if options.ForceMask != nil { + // if ForceMask is in place, make sure lchown is disabled. + doChown = false uid, gid, mode, err := GetFileOwner(dest) if err == nil { value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) @@ -1067,7 +1081,7 @@ loop: chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { + if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go new file mode 100644 index 00000000000..b8b278a1329 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -0,0 +1,627 @@ +package chunked + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + storage "github.com/containers/storage" + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + jsoniter "github.com/json-iterator/go" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + cacheKey = "chunked-manifest-cache" + cacheVersion = 1 +) + +type metadata struct { + tagLen int + digestLen int + tags []byte + vdata []byte +} + +type layer struct { + id string + metadata *metadata + target string +} + +type layersCache struct { + layers []layer + refs int + store storage.Store + mutex sync.RWMutex + created time.Time +} + +var cacheMutex sync.Mutex +var cache *layersCache + +func (c *layersCache) release() { + cacheMutex.Lock() + defer cacheMutex.Unlock() + + c.refs-- + if c.refs == 0 { + cache = nil + } +} + +func getLayersCacheRef(store storage.Store) *layersCache { + cacheMutex.Lock() + defer cacheMutex.Unlock() + if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 { + cache.refs++ + return cache + } + cache := &layersCache{ + store: store, + refs: 1, + created: time.Now(), + } + return cache +} + +func getLayersCache(store storage.Store) (*layersCache, error) { + c := getLayersCacheRef(store) + + if err := c.load(); err != nil { + c.release() + return nil, err + } + return c, nil +} + +func (c *layersCache) load() error { + c.mutex.Lock() + defer c.mutex.Unlock() + + allLayers, err := c.store.Layers() + if err != nil { + return err + } + existingLayers := make(map[string]string) + for _, r := range c.layers { + existingLayers[r.id] = r.target + } + + currentLayers := make(map[string]string) + for _, r := range allLayers { + currentLayers[r.ID] = r.ID + if _, found := existingLayers[r.ID]; found { + continue + } + + bigData, err := c.store.LayerBigData(r.ID, cacheKey) + // if the cache areadly exists, read and use it + if err == nil { + defer bigData.Close() + metadata, err := readMetadataFromCache(bigData) + if err == nil { + c.addLayer(r.ID, metadata) + continue + } + logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) + } else if errors.Cause(err) != os.ErrNotExist { + return err + } + + // otherwise create it from the layer TOC. + manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + if err != nil { + continue + } + defer manifestReader.Close() + + manifest, err := ioutil.ReadAll(manifestReader) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + + metadata, err := writeCache(manifest, r.ID, c.store) + if err == nil { + c.addLayer(r.ID, metadata) + } + } + + var newLayers []layer + for _, l := range c.layers { + if _, found := currentLayers[l.id]; found { + newLayers = append(newLayers, l) + } + } + c.layers = newLayers + + return nil +} + +// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file +// is usable for deduplication with hardlinks. +// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. +func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { + digester := digest.Canonical.Digester() + + modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) + hash := digester.Hash() + + if _, err := hash.Write([]byte(f.Digest)); err != nil { + return "", err + } + + if _, err := hash.Write([]byte(modeString)); err != nil { + return "", err + } + + if len(f.Xattrs) > 0 { + keys := make([]string, 0, len(f.Xattrs)) + for k := range f.Xattrs { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + if _, err := hash.Write([]byte(k)); err != nil { + return "", err + } + if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil { + return "", err + } + } + } + return string(digester.Digest()), nil +} + +// generateFileLocation generates a file location in the form $OFFSET@$PATH +func generateFileLocation(path string, offset uint64) []byte { + return []byte(fmt.Sprintf("%d@%s", offset, path)) +} + +// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. +// the [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the metadata file header. +func generateTag(digest string, offset, len uint64) string { + return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +} + +type setBigData interface { + // SetLayerBigData stores a (possibly large) chunk of named data + SetLayerBigData(id, key string, data io.Reader) error +} + +// writeCache write a cache for the layer ID. +// It generates a sorted list of digests with their offset to the path location and offset. +// The same cache is used to lookup files, chunks and candidates for deduplication with hard links. +// There are 3 kind of digests stored: +// - digest(file.payload)) +// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) +// - digest(i) for each i in chunks(file payload) +func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) { + var vdata bytes.Buffer + tagLen := 0 + digestLen := 0 + var tagsBuffer bytes.Buffer + + toc, err := prepareMetadata(manifest) + if err != nil { + return nil, err + } + + var tags []string + for _, k := range toc { + if k.Digest != "" { + location := generateFileLocation(k.Name, 0) + + off := uint64(vdata.Len()) + l := uint64(len(location)) + + d := generateTag(k.Digest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + fp, err := calculateHardLinkFingerprint(k) + if err != nil { + return nil, err + } + d = generateTag(fp, off, l) + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + + digestLen = len(k.Digest) + } + if k.ChunkDigest != "" { + location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + off := uint64(vdata.Len()) + l := uint64(len(location)) + d := generateTag(k.ChunkDigest, off, l) + if tagLen == 0 { + tagLen = len(d) + } + if tagLen != len(d) { + return nil, errors.New("digest with different length found") + } + tags = append(tags, d) + + if _, err := vdata.Write(location); err != nil { + return nil, err + } + digestLen = len(k.ChunkDigest) + } + } + + sort.Strings(tags) + + for _, t := range tags { + if _, err := tagsBuffer.Write([]byte(t)); err != nil { + return nil, err + } + } + + pipeReader, pipeWriter := io.Pipe() + errChan := make(chan error, 1) + go func() { + defer pipeWriter.Close() + defer close(errChan) + + // version + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { + errChan <- err + return + } + + // len of a tag + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { + errChan <- err + return + } + + // len of a digest + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { + errChan <- err + return + } + + // tags length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + errChan <- err + return + } + + // vdata length + if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { + errChan <- err + return + } + + // tags + if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { + errChan <- err + return + } + + // variable length data + if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { + errChan <- err + return + } + + errChan <- nil + }() + defer pipeReader.Close() + + counter := ioutils.NewWriteCounter(ioutil.Discard) + + r := io.TeeReader(pipeReader, counter) + + if err := dest.SetLayerBigData(id, cacheKey, r); err != nil { + return nil, err + } + + if err := <-errChan; err != nil { + return nil, err + } + + logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) + + return &metadata{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + }, nil +} + +func readMetadataFromCache(bigData io.Reader) (*metadata, error) { + var version, tagLen, digestLen, tagsLen, vdataLen uint64 + if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { + return nil, err + } + if version != cacheVersion { + return nil, nil + } + if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil { + return nil, err + } + + tags := make([]byte, tagsLen) + if _, err := bigData.Read(tags); err != nil { + return nil, err + } + + vdata := make([]byte, vdataLen) + if _, err := bigData.Read(vdata); err != nil { + return nil, err + } + + return &metadata{ + tagLen: int(tagLen), + digestLen: int(digestLen), + tags: tags, + vdata: vdata, + }, nil +} + +func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { + toc, err := unmarshalToc(manifest) + if err != nil { + // ignore errors here. They might be caused by a different manifest format. + return nil, nil + } + + var r []*internal.FileMetadata + chunkSeen := make(map[string]bool) + for i := range toc.Entries { + d := toc.Entries[i].Digest + if d != "" { + r = append(r, &toc.Entries[i]) + continue + } + + // chunks do not use hard link dedup so keeping just one candidate is enough + cd := toc.Entries[i].ChunkDigest + if cd != "" && !chunkSeen[cd] { + r = append(r, &toc.Entries[i]) + chunkSeen[cd] = true + } + } + return r, nil +} + +func (c *layersCache) addLayer(id string, metadata *metadata) error { + target, err := c.store.DifferTarget(id) + if err != nil { + return fmt.Errorf("get checkout directory layer %q: %w", id, err) + } + + l := layer{ + id: id, + metadata: metadata, + target: target, + } + c.layers = append(c.layers, l) + return nil +} + +func byteSliceAsString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func findTag(digest string, metadata *metadata) (string, uint64, uint64) { + if len(digest) != metadata.digestLen { + return "", 0, 0 + } + + nElements := len(metadata.tags) / metadata.tagLen + + i := sort.Search(nElements, func(i int) bool { + d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + return strings.Compare(d, digest) >= 0 + }) + if i < nElements { + d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + if digest == d { + startOff := i*metadata.tagLen + metadata.digestLen + parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) + return digest, uint64(off), uint64(len) + } + } + return "", 0, 0 +} + +func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { + if digest == "" { + return "", "", -1, nil + } + + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, layer := range c.layers { + digest, off, len := findTag(digest, layer.metadata) + if digest != "" { + position := string(layer.metadata.vdata[off : off+len]) + parts := strings.SplitN(position, "@", 2) + offFile, _ := strconv.ParseInt(parts[0], 10, 64) + return layer.target, parts[1], offFile, nil + } + } + + return "", "", -1, nil +} + +// findFileInOtherLayers finds the specified file in other layers. +// file is the file to look for. +func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { + digest := file.Digest + if useHardLinks { + var err error + digest, err = calculateHardLinkFingerprint(file) + if err != nil { + return "", "", err + } + } + target, name, off, err := c.findDigestInternal(digest) + if off == 0 { + return target, name, err + } + return "", "", nil +} + +func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) { + return c.findDigestInternal(chunk.ChunkDigest) +} + +func unmarshalToc(manifest []byte) (*internal.TOC, error) { + var buf bytes.Buffer + count := 0 + var toc internal.TOC + + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type", "name", "linkName", "digest", "chunkDigest", "chunkType": + count += len(iter.ReadStringAsSlice()) + case "xattrs": + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + count += len(iter.ReadStringAsSlice()) + } + default: + iter.Skip() + } + } + } + break + } + + buf.Grow(count) + + getString := func(b []byte) string { + from := buf.Len() + buf.Write(b) + to := buf.Len() + return byteSliceAsString(buf.Bytes()[from:to]) + } + + iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + if field == "version" { + toc.Version = iter.ReadInt() + continue + } + if field != "entries" { + iter.Skip() + continue + } + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch field { + case "type": + m.Type = getString(iter.ReadStringAsSlice()) + case "name": + m.Name = getString(iter.ReadStringAsSlice()) + case "linkName": + m.Linkname = getString(iter.ReadStringAsSlice()) + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "UID": + m.UID = iter.ReadInt() + case "GID": + m.GID = iter.ReadInt() + case "ModTime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devMajor": + m.Devmajor = iter.ReadInt64() + case "devMinor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = getString(iter.ReadStringAsSlice()) + case "offset": + m.Offset = iter.ReadInt64() + case "endOffset": + m.EndOffset = iter.ReadInt64() + case "chunkSize": + m.ChunkSize = iter.ReadInt64() + case "chunkOffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkDigest": + m.ChunkDigest = getString(iter.ReadStringAsSlice()) + case "chunkType": + m.ChunkType = getString(iter.ReadStringAsSlice()) + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + value := iter.ReadStringAsSlice() + m.Xattrs[key] = getString(value) + } + default: + iter.Skip() + } + } + toc.Entries = append(toc.Entries, m) + } + break + } + toc.StringsBuf = buf + return &toc, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index 092cf584af3..aeb7cfd4f01 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -5,6 +5,7 @@ package compressor // larger software like the graph drivers. import ( + "bufio" "encoding/base64" "io" "io/ioutil" @@ -15,6 +16,189 @@ import ( "github.com/vbatts/tar-split/archive/tar" ) +const RollsumBits = 16 +const holesThreshold = int64(1 << 10) + +type holesFinder struct { + reader *bufio.Reader + fileOff int64 + zeros int64 + from int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// ReadByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) ReadByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := int64(len(b)) + if rc.pendingHole < toCopy { + toCopy = rc.pendingHole + } + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := 0; i < len(b); i++ { + holeLen, n, err := rc.reader.ReadByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) @@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r if _, err := zstdWriter.Write(rawBytes); err != nil { return err } - payloadDigester := digest.Canonical.Digester() - payloadChecksum := payloadDigester.Hash() - payloadDest := io.MultiWriter(payloadChecksum, zstdWriter) + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() // Now handle the payload, if any - var startOffset, endOffset int64 + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) for { - read, errRead := tr.Read(buf) + mustSplit, read, errRead := rcReader.Read(buf) if errRead != nil && errRead != io.EOF { return err } - - // restart the compression only if there is - // a payload. + // restart the compression only if there is a payload. if read > 0 { if startOffset == 0 { startOffset, err = restartCompression() if err != nil { return err } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err } - _, err := payloadDest.Write(buf[:read]) + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() if err != nil { return err } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := internal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = internal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) } if errRead == io.EOF { if startOffset > 0 { - endOffset, err = restartCompression() - if err != nil { - return err - } checksum = payloadDigester.Digest().String() } break @@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r for k, v := range hdr.Xattrs { xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) } - m := internal.FileMetadata{ - Type: typ, - Name: hdr.Name, - Linkname: hdr.Linkname, - Mode: hdr.Mode, - Size: hdr.Size, - UID: hdr.Uid, - GID: hdr.Gid, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - Devmajor: hdr.Devmajor, - Devminor: hdr.Devminor, - Xattrs: xattrs, - Digest: checksum, - Offset: startOffset, - EndOffset: endOffset, - - // ChunkSize is 0 for the last chunk - ChunkSize: 0, - ChunkOffset: 0, - ChunkDigest: checksum, - } - metadata = append(metadata, m) + entries := []internal.FileMetadata{ + { + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: &hdr.ModTime, + AccessTime: &hdr.AccessTime, + ChangeTime: &hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Digest: checksum, + Offset: startOffset, + EndOffset: lastOffset, + }, + } + for i := 1; i < len(chunks); i++ { + entries = append(entries, internal.FileMetadata{ + Type: internal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) } rawBytes := tr.RawBytes() @@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { if level == nil { - l := 3 + l := 10 level = &l } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 00000000000..f4dfad822e9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const windowSize = 64 // Roll assumes windowSize is a power of 2 +const charOffset = 31 + +const blobBits = 13 +const blobSize = 1 << blobBits // 8k + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index c91c43d85cd..3bb5286d92d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -8,11 +8,11 @@ import ( "archive/tar" "bytes" "encoding/binary" - "encoding/json" "fmt" "io" "time" + jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" ) @@ -20,6 +20,9 @@ import ( type TOC struct { Version int `json:"version"` Entries []FileMetadata `json:"entries"` + + // internal: used by unmarshalToc + StringsBuf bytes.Buffer `json:"-"` } type FileMetadata struct { @@ -27,25 +30,33 @@ type FileMetadata struct { Name string `json:"name"` Linkname string `json:"linkName,omitempty"` Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size"` - UID int `json:"uid"` - GID int `json:"gid"` - ModTime time.Time `json:"modtime"` - AccessTime time.Time `json:"accesstime"` - ChangeTime time.Time `json:"changetime"` - Devmajor int64 `json:"devMajor"` - Devminor int64 `json:"devMinor"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` Xattrs map[string]string `json:"xattrs,omitempty"` Digest string `json:"digest,omitempty"` Offset int64 `json:"offset,omitempty"` EndOffset int64 `json:"endOffset,omitempty"` - // Currently chunking is not supported. ChunkSize int64 `json:"chunkSize,omitempty"` ChunkOffset int64 `json:"chunkOffset,omitempty"` ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` + + // internal: computed by mergeTOCEntries. + Chunks []*FileMetadata `json:"-"` } +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + const ( TypeReg = "reg" TypeChunk = "chunk" @@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off Entries: metadata, } + var json = jsoniter.ConfigCompatibleWithStandardLibrary // Generate the manifest manifest, err := json.Marshal(toc) if err != nil { diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 6efc6a4c845..7de20feaaa0 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -4,8 +4,8 @@ import ( archivetar "archive/tar" "context" "encoding/base64" - "encoding/json" "fmt" + "hash" "io" "io/ioutil" "os" @@ -13,6 +13,8 @@ import ( "reflect" "sort" "strings" + "sync" + "sync/atomic" "syscall" "time" @@ -25,6 +27,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" + securejoin "github.com/cyphar/filepath-securejoin" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -41,24 +44,35 @@ const ( bigDataKey = "zstd-chunked-manifest" fileTypeZstdChunked = iota - fileTypeEstargz = iota + fileTypeEstargz + fileTypeNoCompression + fileTypeHole + + copyGoRoutines = 32 ) type compressedFileType int type chunkedDiffer struct { - stream ImageSourceSeekable - manifest []byte - layersMetadata map[string][]internal.FileMetadata - layersTarget map[string]string - tocOffset int64 - fileType compressedFileType + stream ImageSourceSeekable + manifest []byte + layersCache *layersCache + tocOffset int64 + fileType compressedFileType + + copyBuffer []byte gzipReader *pgzip.Reader + zstdReader *zstd.Decoder + rawReader io.Reader +} + +var xattrsToIgnore = map[string]interface{}{ + "security.selinux": true, } -func timeToTimespec(time time.Time) (ts unix.Timespec) { - if time.IsZero() { +func timeToTimespec(time *time.Time) (ts unix.Timespec) { + if time == nil || time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 ts.Nsec = ((1 << 30) - 2) @@ -67,11 +81,29 @@ func timeToTimespec(time time.Time) (ts unix.Timespec) { return unix.NsecToTimespec(time.UnixNano()) } +func doHardLink(srcFd int, destDirFd int, destBase string) error { + doLink := func() error { + // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses + // /proc/self/fd doesn't and can be used with rootless. + srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) + return unix.Linkat(unix.AT_FDCWD, srcPath, destDirFd, destBase, unix.AT_SYMLINK_FOLLOW) + } + + err := doLink() + + // if the destination exists, unlink it first and try again + if err != nil && os.IsExist(err) { + unix.Unlinkat(destDirFd, destBase, 0) + return doLink() + } + return err +} + func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("copy file content for %q: %w", destFile, err) } copyWithFileRange, copyWithFileClone := true, true @@ -83,20 +115,7 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us if err == nil { defer destDir.Close() - doLink := func() error { - // Using unix.AT_EMPTY_PATH requires CAP_DAC_READ_SEARCH while this variant that uses - // /proc/self/fd doesn't and can be used with rootless. - srcPath := fmt.Sprintf("/proc/self/fd/%d", srcFd) - return unix.Linkat(unix.AT_FDCWD, srcPath, int(destDir.Fd()), destBase, unix.AT_SYMLINK_FOLLOW) - } - - err := doLink() - - // if the destination exists, unlink it first and try again - if err != nil && os.IsExist(err) { - unix.Unlinkat(int(destDir.Fd()), destBase, 0) - err = doLink() - } + err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { return nil, st.Size(), nil } @@ -106,63 +125,15 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us // If the destination file already exists, we shouldn't blow it away dstFile, err := openFileUnderRoot(destFile, dirfd, newFileFlags, mode) if err != nil { - return nil, -1, err + return nil, -1, fmt.Errorf("open file %q under rootfs for copy: %w", destFile, err) } err = driversCopy.CopyRegularToFile(src, dstFile, st, ©WithFileRange, ©WithFileClone) if err != nil { dstFile.Close() - return nil, -1, err - } - return dstFile, st.Size(), err -} - -func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string][]*internal.FileMetadata { - maps := make(map[string]map[string][]*internal.FileMetadata) - - for layerID, v := range layersMetadata { - r := make(map[string][]*internal.FileMetadata) - for i := range v { - if v[i].Digest != "" { - r[v[i].Digest] = append(r[v[i].Digest], &v[i]) - } - } - maps[layerID] = r - } - return maps -} - -func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) { - allLayers, err := store.Layers() - if err != nil { - return nil, nil, err - } - - layersMetadata := make(map[string][]internal.FileMetadata) - layersTarget := make(map[string]string) - for _, r := range allLayers { - manifestReader, err := store.LayerBigData(r.ID, bigDataKey) - if err != nil { - continue - } - defer manifestReader.Close() - manifest, err := ioutil.ReadAll(manifestReader) - if err != nil { - return nil, nil, err - } - var toc internal.TOC - if err := json.Unmarshal(manifest, &toc); err != nil { - continue - } - layersMetadata[r.ID] = toc.Entries - target, err := store.DifferTarget(r.ID) - if err != nil { - return nil, nil, err - } - layersTarget[r.ID] = target + return nil, -1, fmt.Errorf("copy to file %q under rootfs: %w", destFile, err) } - - return layersMetadata, layersTarget, nil + return dstFile, st.Size(), nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. @@ -179,67 +150,71 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeZstdChunked, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeZstdChunked, }, nil } func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) if err != nil { - return nil, err + return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } - layersMetadata, layersTarget, err := getLayersCache(store) + layersCache, err := getLayersCache(store) if err != nil { return nil, err } return &chunkedDiffer{ - stream: iss, - manifest: manifest, - layersMetadata: layersMetadata, - layersTarget: layersTarget, - tocOffset: tocOffset, - fileType: fileTypeEstargz, + copyBuffer: makeCopyBuffer(), + stream: iss, + manifest: manifest, + layersCache: layersCache, + tocOffset: tocOffset, + fileType: fileTypeEstargz, }, nil } +func makeCopyBuffer() []byte { + return make([]byte, 2<<20) +} + // copyFileFromOtherLayer copies a file from another layer // file is the file to look for. // source is the path to the source layer checkout. -// otherFile contains the metadata for the file. +// name is the path to the file to copy in source. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file: %w", err) } defer unix.Close(srcDirfd) - srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0) + srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err) } defer srcFile.Close() dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) if err != nil { - return false, nil, 0, err + return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } - return true, dstFile, written, err + return true, dstFile, written, nil } // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. @@ -275,10 +250,6 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return false } - xattrsToIgnore := map[string]interface{}{ - "security.selinux": true, - } - xattrs := make(map[string]string) for _, x := range listXattrs { v, err := system.Lgetxattr(path, x) @@ -301,45 +272,9 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo return canDedupMetadataWithHardLink(file, &otherFile) } -// findFileInOtherLayers finds the specified file in other layers. -// file is the file to look for. -// dirfd is an open file descriptor to the checkout root directory. -// layersMetadata contains the metadata for each layer in the storage. -// layersTarget maps each layer to its checkout on disk. -// useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOtherLayers(file *internal.FileMetadata, dirfd int, layersMetadata map[string]map[string][]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) { - // this is ugly, needs to be indexed - for layerID, checksums := range layersMetadata { - source, ok := layersTarget[layerID] - if !ok { - continue - } - files, found := checksums[file.Digest] - if !found { - continue - } - for _, candidate := range files { - // check if it is a valid candidate to dedup file - if useHardLinks && !canDedupMetadataWithHardLink(file, candidate) { - continue - } - - found, dstFile, written, err := copyFileFromOtherLayer(file, source, candidate, dirfd, useHardLinks) - if found && err == nil { - return found, dstFile, written, err - } - } - } - // If hard links deduplication was used and it has failed, try again without hard links. - if useHardLinks { - return findFileInOtherLayers(file, dirfd, layersMetadata, layersTarget, false) - } - return false, nil, 0, nil -} - -func getFileDigest(f *os.File) (digest.Digest, error) { +func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) { digester := digest.Canonical.Digester() - if _, err := io.Copy(digester.Hash(), f); err != nil { + if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil { return "", err } return digester.Digest(), nil @@ -401,7 +336,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di // file is the file to look for. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) { sourceFile := filepath.Clean(filepath.Join("/", file.Name)) if !strings.HasPrefix(sourceFile, "/usr/") { // limit host deduplication to files under /usr. @@ -430,7 +365,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return false, nil, 0, err } - checksum, err := getFileDigest(f) + checksum, err := getFileDigest(f, buf) if err != nil { return false, nil, 0, err } @@ -452,7 +387,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool dstFile.Close() return false, nil, 0, err } - checksum, err = getFileDigest(f) + checksum, err = getFileDigest(f, buf) if err != nil { dstFile.Close() return false, nil, 0, err @@ -464,6 +399,19 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool return true, dstFile, written, nil } +// findFileInOtherLayers finds the specified file in other layers. +// cache is the layers cache to use. +// file is the file to look for. +// dirfd is an open file descriptor to the checkout root directory. +// useHardLinks defines whether the deduplication can be performed using hard links. +func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { + target, name, err := cache.findFileInOtherLayers(file, useHardLinks) + if err != nil || name == "" { + return false, nil, 0, err + } + return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) +} + func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil @@ -490,22 +438,50 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption return nil } -type missingFile struct { - File *internal.FileMetadata +type originFile struct { + Root string + Path string + Offset int64 +} + +type missingFileChunk struct { Gap int64 + Hole bool + + File *internal.FileMetadata + + CompressedSize int64 + UncompressedSize int64 } -func (m missingFile) Length() int64 { - return m.File.EndOffset - m.File.Offset +type missingPart struct { + Hole bool + SourceChunk *ImageSourceChunk + OriginFile *originFile + Chunks []missingFileChunk } -type missingChunk struct { - RawChunk ImageSourceChunk - Files []missingFile +func (o *originFile) OpenFile() (io.ReadCloser, error) { + srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file: %w", err) + } + defer unix.Close(srcDirfd) + + srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0) + if err != nil { + return nil, fmt.Errorf("open source file under target rootfs: %w", err) + } + + if _, err := srcFile.Seek(o.Offset, 0); err != nil { + srcFile.Close() + return nil, err + } + return srcFile, nil } // setFileAttrs sets the file attributes for file given metadata -func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { if file == nil || file.Fd() < 0 { return errors.Errorf("invalid file") } @@ -515,209 +491,600 @@ func setFileAttrs(file *os.File, mode os.FileMode, metadata *internal.FileMetada if err != nil { return err } + + // If it is a symlink, force to use the path if t == tar.TypeSymlink { - return nil + usePath = true } - if err := unix.Fchown(fd, metadata.UID, metadata.GID); err != nil { + baseName := "" + if usePath { + dirName := filepath.Dir(metadata.Name) + if dirName != "" { + parentFd, err := openFileUnderRoot(dirName, dirfd, unix.O_PATH|unix.O_DIRECTORY, 0) + if err != nil { + return err + } + defer parentFd.Close() + + dirfd = int(parentFd.Fd()) + } + baseName = filepath.Base(metadata.Name) + } + + doChown := func() error { + if usePath { + return unix.Fchownat(dirfd, baseName, metadata.UID, metadata.GID, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchown(fd, metadata.UID, metadata.GID) + } + + doSetXattr := func(k string, v []byte) error { + return unix.Fsetxattr(fd, k, v, 0) + } + + doUtimes := func() error { + ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} + if usePath { + return unix.UtimesNanoAt(dirfd, baseName, ts, unix.AT_SYMLINK_NOFOLLOW) + } + return unix.UtimesNanoAt(unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", fd), ts, 0) + } + + doChmod := func() error { + if usePath { + return unix.Fchmodat(dirfd, baseName, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + } + return unix.Fchmod(fd, uint32(mode)) + } + + if err := doChown(); err != nil { if !options.IgnoreChownErrors { - return err + return fmt.Errorf("chown %q to %d:%d: %w", metadata.Name, metadata.UID, metadata.GID, err) } } + canIgnore := func(err error) bool { + return err == nil || errors.Is(err, unix.ENOSYS) || errors.Is(err, unix.ENOTSUP) + } + for k, v := range metadata.Xattrs { + if _, found := xattrsToIgnore[k]; found { + continue + } data, err := base64.StdEncoding.DecodeString(v) if err != nil { - return err + return fmt.Errorf("decode xattr %q: %w", v, err) } - if err := unix.Fsetxattr(fd, k, data, 0); err != nil { - return err + if err := doSetXattr(k, data); !canIgnore(err) { + return fmt.Errorf("set xattr %s=%q for %q: %w", k, data, metadata.Name, err) } } - ts := []unix.Timespec{timeToTimespec(metadata.AccessTime), timeToTimespec(metadata.ModTime)} - if err := unix.UtimesNanoAt(fd, "", ts, 0); err != nil && errors.Is(err, unix.ENOSYS) { - return err + if err := doUtimes(); !canIgnore(err) { + return fmt.Errorf("set utimes for %q: %w", metadata.Name, err) } - if err := unix.Fchmod(fd, uint32(mode)); err != nil { - return err + if err := doChmod(); !canIgnore(err) { + return fmt.Errorf("chmod %q: %w", metadata.Name, err) } return nil } -// openFileUnderRoot safely opens a file under the specified root directory using openat2 -// name is the path to open relative to dirfd. -// dirfd is an open file descriptor to the target checkout directory. -// flags are the flags top pass to the open syscall. -// mode specifies the mode to use for newly created files. -func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { +func openFileUnderRootFallback(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + root := fmt.Sprintf("/proc/self/fd/%d", dirfd) + + targetRoot, err := os.Readlink(root) + if err != nil { + return -1, err + } + + hasNoFollow := (flags & unix.O_NOFOLLOW) != 0 + + fd := -1 + // If O_NOFOLLOW is specified in the flags, then resolve only the parent directory and use the + // last component as the path to openat(). + if hasNoFollow { + dirName := filepath.Dir(name) + if dirName != "" { + newRoot, err := securejoin.SecureJoin(root, filepath.Dir(name)) + if err != nil { + return -1, err + } + root = newRoot + } + + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return -1, err + } + defer unix.Close(parentDirfd) + + fd, err = unix.Openat(parentDirfd, filepath.Base(name), int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } else { + newPath, err := securejoin.SecureJoin(root, name) + if err != nil { + return -1, err + } + fd, err = unix.Openat(dirfd, newPath, int(flags), uint32(mode)) + if err != nil { + return -1, err + } + } + + target, err := os.Readlink(fmt.Sprintf("/proc/self/fd/%d", fd)) + if err != nil { + unix.Close(fd) + return -1, err + } + + // Add an additional check to make sure the opened fd is inside the rootfs + if !strings.HasPrefix(target, targetRoot) { + unix.Close(fd) + return -1, fmt.Errorf("error while resolving %q. It resolves outside the root directory", name) + } + + return fd, err +} + +func openFileUnderRootOpenat2(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { how := unix.OpenHow{ Flags: flags, Mode: uint64(mode & 07777), Resolve: unix.RESOLVE_IN_ROOT, } + return unix.Openat2(dirfd, name, &how) +} - fd, err := unix.Openat2(dirfd, name, &how) - if err != nil { - return nil, err +// skipOpenat2 is set when openat2 is not supported by the underlying kernel and avoid +// using it again. +var skipOpenat2 int32 + +// openFileUnderRootRaw tries to open a file using openat2 and if it is not supported fallbacks to a +// userspace lookup. +func openFileUnderRootRaw(dirfd int, name string, flags uint64, mode os.FileMode) (int, error) { + var fd int + var err error + if atomic.LoadInt32(&skipOpenat2) > 0 { + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } else { + fd, err = openFileUnderRootOpenat2(dirfd, name, flags, mode) + // If the function failed with ENOSYS, switch off the support for openat2 + // and fallback to using safejoin. + if err != nil && errors.Is(err, unix.ENOSYS) { + atomic.StoreInt32(&skipOpenat2, 1) + fd, err = openFileUnderRootFallback(dirfd, name, flags, mode) + } } - return os.NewFile(uintptr(fd), name), nil + return fd, err } -func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) { - file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) - if err != nil { - return err - } - defer func() { - err2 := file.Close() - if err == nil { - err = err2 +// openFileUnderRoot safely opens a file under the specified root directory using openat2 +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// flags are the flags to pass to the open syscall. +// mode specifies the mode to use for newly created files. +func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + hasCreate := (flags & unix.O_CREAT) != 0 + if errors.Is(err, unix.ENOENT) && hasCreate { + parent := filepath.Dir(name) + if parent != "" { + newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0) + if err2 == nil { + defer newDirfd.Close() + fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + } } - }() + } + return nil, fmt.Errorf("open %q under the rootfs: %w", name, err) +} - digester := digest.Canonical.Digester() - checksum := digester.Hash() - to := io.MultiWriter(file, checksum) +// openOrCreateDirUnderRoot safely opens a directory or create it if it is missing. +// name is the path to open relative to dirfd. +// dirfd is an open file descriptor to the target checkout directory. +// mode specifies the mode to use for newly created files. +func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.File, error) { + fd, err := openFileUnderRootRaw(dirfd, name, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } + + if errors.Is(err, unix.ENOENT) { + parent := filepath.Dir(name) + if parent != "" { + pDir, err2 := openOrCreateDirUnderRoot(parent, dirfd, mode) + if err2 != nil { + return nil, err + } + defer pDir.Close() - switch c.fileType { - case fileTypeZstdChunked: - z, err := zstd.NewReader(reader) - if err != nil { - return err - } - defer z.Close() + baseName := filepath.Base(name) - if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil { - return err + if err2 := unix.Mkdirat(int(pDir.Fd()), baseName, 0755); err2 != nil { + return nil, err + } + + fd, err = openFileUnderRootRaw(int(pDir.Fd()), baseName, unix.O_DIRECTORY|unix.O_RDONLY, mode) + if err == nil { + return os.NewFile(uintptr(fd), name), nil + } } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { - return err + } + return nil, err +} + +func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) { + switch { + case partCompression == fileTypeHole: + // The entire part is a hole. Do not need to read from a file. + c.rawReader = nil + return fileTypeHole, nil + case mf.Hole: + // Only the missing chunk in the requested part refers to a hole. + // The received data must be discarded. + limitReader := io.LimitReader(from, mf.CompressedSize) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) + return fileTypeHole, err + case partCompression == fileTypeZstdChunked: + c.rawReader = io.LimitReader(from, mf.CompressedSize) + if c.zstdReader == nil { + r, err := zstd.NewReader(c.rawReader) + if err != nil { + return partCompression, err + } + c.zstdReader = r + } else { + if err := c.zstdReader.Reset(c.rawReader); err != nil { + return partCompression, err + } } - case fileTypeEstargz: + case partCompression == fileTypeEstargz: + c.rawReader = io.LimitReader(from, mf.CompressedSize) if c.gzipReader == nil { - r, err := pgzip.NewReader(reader) + r, err := pgzip.NewReader(c.rawReader) if err != nil { - return err + return partCompression, err } c.gzipReader = r } else { - if err := c.gzipReader.Reset(reader); err != nil { - return err + if err := c.gzipReader.Reset(c.rawReader); err != nil { + return partCompression, err } } - defer c.gzipReader.Close() + case partCompression == fileTypeNoCompression: + c.rawReader = io.LimitReader(from, mf.UncompressedSize) + default: + return partCompression, fmt.Errorf("unknown file type %q", c.fileType) + } + return partCompression, nil +} - if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil { +// hashHole writes SIZE zeros to the specified hasher +func hashHole(h hash.Hash, size int64, copyBuffer []byte) error { + count := int64(len(copyBuffer)) + if size < count { + count = size + } + for i := int64(0); i < count; i++ { + copyBuffer[i] = 0 + } + for size > 0 { + count = int64(len(copyBuffer)) + if size < count { + count = size + } + if _, err := h.Write(copyBuffer[:count]); err != nil { return err } - if _, err := io.Copy(ioutil.Discard, reader); err != nil { + size -= count + } + return nil +} + +// appendHole creates a hole with the specified size at the open fd. +func appendHole(fd int, size int64) error { + off, err := unix.Seek(fd, size, unix.SEEK_CUR) + if err != nil { + return err + } + // Make sure the file size is changed. It might be the last hole and no other data written afterwards. + if err := unix.Ftruncate(fd, off); err != nil { + return err + } + return nil +} + +func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error { + switch compression { + case fileTypeZstdChunked: + defer c.zstdReader.Reset(nil) + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeEstargz: + defer c.gzipReader.Close() + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeNoCompression: + if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil { + return err + } + case fileTypeHole: + if err := appendHole(int(destFile.file.Fd()), size); err != nil { + return err + } + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { return err } default: return fmt.Errorf("unknown file type %q", c.fileType) } + return nil +} + +type destinationFile struct { + dirfd int + file *os.File + digester digest.Digester + hash hash.Hash + to io.Writer + metadata *internal.FileMetadata + options *archive.TarOptions +} + +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { + file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) + if err != nil { + return nil, err + } + + digester := digest.Canonical.Digester() + hash := digester.Hash() + to := io.MultiWriter(file, hash) + + return &destinationFile{ + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + }, nil +} - manifestChecksum, err := digest.Parse(metadata.Digest) +func (d *destinationFile) Close() error { + manifestChecksum, err := digest.Parse(d.metadata.Digest) if err != nil { return err } - if digester.Digest() != manifestChecksum { - return fmt.Errorf("checksum mismatch for %q", dest) + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) } - return setFileAttrs(file, mode, metadata, options) + + return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) } -func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { - for mc := 0; ; mc++ { - var part io.ReadCloser - select { - case p := <-streams: - part = p - case err := <-errs: - return err - } - if part == nil { - if mc == len(missingChunks) { - break +func closeDestinationFiles(files chan *destinationFile, errors chan error) { + for f := range files { + errors <- f.Close() + } + close(errors) +} + +func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { + var destFile *destinationFile + + filesToClose := make(chan *destinationFile, 3) + closeFilesErrors := make(chan error, 2) + + go closeDestinationFiles(filesToClose, closeFilesErrors) + defer func() { + close(filesToClose) + for e := range closeFilesErrors { + if e != nil && Err == nil { + Err = e } - return errors.Errorf("invalid stream returned") } - if mc == len(missingChunks) { - part.Close() - return errors.Errorf("too many chunks returned") + }() + + for _, missingPart := range missingParts { + var part io.ReadCloser + partCompression := c.fileType + switch { + case missingPart.Hole: + partCompression = fileTypeHole + case missingPart.OriginFile != nil: + var err error + part, err = missingPart.OriginFile.OpenFile() + if err != nil { + return err + } + partCompression = fileTypeNoCompression + case missingPart.SourceChunk != nil: + select { + case p := <-streams: + part = p + case err := <-errs: + return err + } + if part == nil { + return errors.Errorf("invalid stream returned") + } + default: + return errors.Errorf("internal error: missing part misses both local and remote data stream") } - for _, mf := range missingChunks[mc].Files { + for _, mf := range missingPart.Chunks { if mf.Gap > 0 { limitReader := io.LimitReader(part, mf.Gap) - _, err := io.Copy(ioutil.Discard, limitReader) + _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer) if err != nil { - part.Close() - return err + Err = err + goto exit } continue } - limitReader := io.LimitReader(part, mf.Length()) + if mf.File.Name == "" { + Err = errors.Errorf("file name empty") + goto exit + } - if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil { - part.Close() - return err + compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf) + if err != nil { + Err = err + goto exit + } + + // Open the new file if it is different that what is already + // opened + if destFile == nil || destFile.metadata.Name != mf.File.Name { + var err error + if destFile != nil { + cleanup: + for { + select { + case err = <-closeFilesErrors: + if err != nil { + Err = err + goto exit + } + default: + break cleanup + } + } + filesToClose <- destFile + } + destFile, err = openDestinationFile(dirfd, mf.File, options) + if err != nil { + Err = err + goto exit + } + } + + if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil { + Err = err + goto exit + } + if c.rawReader != nil { + if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil { + Err = err + goto exit + } + } + } + exit: + if part != nil { + part.Close() + if Err != nil { + break } } - part.Close() } + + if destFile != nil { + return destFile.Close() + } + return nil } -func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk { - if len(missingChunks) <= target { - return missingChunks +func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { + getGap := func(missingParts []missingPart, i int) int { + prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length + return int(missingParts[i].SourceChunk.Offset - prev) + } + getCost := func(missingParts []missingPart, i int) int { + cost := getGap(missingParts, i) + if missingParts[i-1].OriginFile != nil { + cost += int(missingParts[i-1].SourceChunk.Length) + } + if missingParts[i].OriginFile != nil { + cost += int(missingParts[i].SourceChunk.Length) + } + return cost + } + + // simple case: merge chunks from the same file. + newMissingParts := missingParts[0:1] + prevIndex := 0 + for i := 1; i < len(missingParts); i++ { + gap := getGap(missingParts, i) + if gap == 0 && missingParts[prevIndex].OriginFile == nil && + missingParts[i].OriginFile == nil && + !missingParts[prevIndex].Hole && !missingParts[i].Hole && + len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && + missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { + missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize + missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize + } else { + newMissingParts = append(newMissingParts, missingParts[i]) + prevIndex++ + } } + missingParts = newMissingParts - getGap := func(missingChunks []missingChunk, i int) int { - prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length - return int(missingChunks[i].RawChunk.Offset - prev) + if len(missingParts) <= target { + return missingParts } // this implementation doesn't account for duplicates, so it could merge // more than necessary to reach the specified target. Since target itself // is a heuristic value, it doesn't matter. - var gaps []int - for i := 1; i < len(missingChunks); i++ { - gaps = append(gaps, getGap(missingChunks, i)) + costs := make([]int, len(missingParts)-1) + for i := 1; i < len(missingParts); i++ { + costs[i-1] = getCost(missingParts, i) } - sort.Ints(gaps) + sort.Ints(costs) - toShrink := len(missingChunks) - target - targetValue := gaps[toShrink-1] + toShrink := len(missingParts) - target + if toShrink >= len(costs) { + toShrink = len(costs) - 1 + } + targetValue := costs[toShrink] - newMissingChunks := missingChunks[0:1] - for i := 1; i < len(missingChunks); i++ { - gap := getGap(missingChunks, i) - if gap > targetValue { - newMissingChunks = append(newMissingChunks, missingChunks[i]) + newMissingParts = missingParts[0:1] + for i := 1; i < len(missingParts); i++ { + if getCost(missingParts, i) > targetValue { + newMissingParts = append(newMissingParts, missingParts[i]) } else { - prev := &newMissingChunks[len(newMissingChunks)-1] - prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length + gap := getGap(missingParts, i) + prev := &newMissingParts[len(newMissingParts)-1] + prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length + prev.Hole = false + prev.OriginFile = nil if gap > 0 { - gapFile := missingFile{ + gapFile := missingFileChunk{ Gap: int64(gap), } - prev.Files = append(prev.Files, gapFile) + prev.Chunks = append(prev.Chunks, gapFile) } - prev.Files = append(prev.Files, missingChunks[i].Files...) + prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...) } } - return newMissingChunks + return newMissingParts } -func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error { +func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk - for _, c := range missingChunks { - chunksToRequest = append(chunksToRequest, c.RawChunk) + for _, c := range missingParts { + if c.OriginFile == nil && !c.Hole { + chunksToRequest = append(chunksToRequest, *c.SourceChunk) + } } // There are some missing files. Prepare a multirange request for the missing chunks. @@ -731,32 +1098,32 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChun } if _, ok := err.(ErrBadRequest); ok { - requested := len(missingChunks) + requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. if requested < 64 { return err } // Merge more chunks to request - missingChunks = mergeMissingChunks(missingChunks, requested/2) + missingParts = mergeMissingChunks(missingParts, requested/2) continue } return err } - if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil { + if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil { return err } return nil } -func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - parent := filepath.Dir(metadata.Name) - base := filepath.Base(metadata.Name) +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { + parent := filepath.Dir(name) + base := filepath.Base(name) parentFd := dirfd if parent != "." { - parentFile, err := openFileUnderRoot(parent, dirfd, unix.O_DIRECTORY|unix.O_PATH|unix.O_RDONLY, 0) + parentFile, err := openOrCreateDirUnderRoot(parent, dirfd, 0) if err != nil { return err } @@ -766,21 +1133,21 @@ func safeMkdir(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opt if err := unix.Mkdirat(parentFd, base, uint32(mode)); err != nil { if !os.IsExist(err) { - return err + return fmt.Errorf("mkdir %q: %w", name, err) } } - file, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_RDONLY, 0) + file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0) if err != nil { return err } defer file.Close() - return setFileAttrs(file, mode, metadata, options) + return setFileAttrs(dirfd, file, mode, metadata, options, false) } func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { - sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_RDONLY, 0) + sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err } @@ -789,7 +1156,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -797,25 +1164,35 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti destDirFd = int(f.Fd()) } - err = unix.Linkat(int(sourceFile.Fd()), "", destDirFd, destBase, unix.AT_EMPTY_PATH) + err = doHardLink(int(sourceFile.Fd()), destDirFd, destBase) if err != nil { - return err + return fmt.Errorf("create hardlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) } - newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY, 0) + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_WRONLY|unix.O_NOFOLLOW, 0) if err != nil { + // If the target is a symlink, open the file with O_PATH. + if errors.Is(err, unix.ELOOP) { + newFile, err := openFileUnderRoot(metadata.Name, dirfd, unix.O_PATH|unix.O_NOFOLLOW, 0) + if err != nil { + return err + } + defer newFile.Close() + + return setFileAttrs(dirfd, newFile, mode, metadata, options, true) + } return err } defer newFile.Close() - return setFileAttrs(newFile, mode, metadata, options) + return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { - f, err := openFileUnderRoot(destDir, dirfd, unix.O_RDONLY, 0) + f, err := openOrCreateDirUnderRoot(destDir, dirfd, 0) if err != nil { return err } @@ -823,7 +1200,10 @@ func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, o destDirFd = int(f.Fd()) } - return unix.Symlinkat(metadata.Linkname, destDirFd, destBase) + if err := unix.Symlinkat(metadata.Linkname, destDirFd, destBase); err != nil { + return fmt.Errorf("create symlink %q pointing to %q: %w", metadata.Name, metadata.Linkname, err) + } + return nil } type whiteoutHandler struct { @@ -832,13 +1212,16 @@ type whiteoutHandler struct { } func (d whiteoutHandler) Setxattr(path, name string, value []byte) error { - file, err := openFileUnderRoot(path, d.Dirfd, unix.O_RDONLY, 0) + file, err := openOrCreateDirUnderRoot(path, d.Dirfd, 0) if err != nil { return err } defer file.Close() - return unix.Fsetxattr(int(file.Fd()), name, value, 0) + if err := unix.Fsetxattr(int(file.Fd()), name, value, 0); err != nil { + return fmt.Errorf("set xattr %s=%q for %q: %w", name, value, path, err) + } + return nil } func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { @@ -847,7 +1230,7 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd := d.Dirfd if dir != "" { - dir, err := openFileUnderRoot(dir, d.Dirfd, unix.O_RDONLY, 0) + dir, err := openOrCreateDirUnderRoot(dir, d.Dirfd, 0) if err != nil { return err } @@ -856,12 +1239,16 @@ func (d whiteoutHandler) Mknod(path string, mode uint32, dev int) error { dirfd = int(dir.Fd()) } - return unix.Mknodat(dirfd, base, mode, dev) + if err := unix.Mknodat(dirfd, base, mode, dev); err != nil { + return fmt.Errorf("mknod %q: %w", path, err) + } + + return nil } func checkChownErr(err error, name string, uid, gid int) error { if errors.Is(err, syscall.EINVAL) { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err) } return err } @@ -899,7 +1286,69 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo return def } +type findAndCopyFileOptions struct { + useHardLinks bool + enableHostDedup bool + ostreeRepos []string + options *archive.TarOptions +} + +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { + finalizeFile := func(dstFile *os.File) error { + if dstFile != nil { + defer dstFile.Close() + if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { + return err + } + } + return nil + } + + found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + + if copyOptions.enableHostDedup { + found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer) + if err != nil { + return false, err + } + if found { + if err := finalizeFile(dstFile); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { + defer c.layersCache.release() + defer func() { + if c.zstdReader != nil { + c.zstdReader.Close() + } + }() + bigData := map[string][]byte{ bigDataKey: c.manifest, } @@ -927,14 +1376,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") // Generate the manifest - var toc internal.TOC - if err := json.Unmarshal(c.manifest, &toc); err != nil { + toc, err := unmarshalToc(c.manifest) + if err != nil { return output, err } whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) - var missingChunks []missingChunk + var missingParts []missingPart mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { @@ -956,17 +1405,61 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra dirfd, err := unix.Open(dest, unix.O_RDONLY|unix.O_PATH, 0) if err != nil { - return output, err + return output, fmt.Errorf("cannot open %q: %w", dest, err) } defer unix.Close(dirfd) - otherLayersCache := prepareOtherLayersCache(c.layersMetadata) - // hardlinks can point to missing files. So create them after all files // are retrieved var hardLinks []hardLinkToCreate - missingChunksSize, totalChunksSize := int64(0), int64(0) + missingPartsSize, totalChunksSize := int64(0), int64(0) + + copyOptions := findAndCopyFileOptions{ + useHardLinks: useHardLinks, + enableHostDedup: enableHostDedup, + ostreeRepos: ostreeRepos, + options: options, + } + + type copyFileJob struct { + njob int + index int + mode os.FileMode + metadata *internal.FileMetadata + + found bool + err error + } + + var wg sync.WaitGroup + + copyResults := make([]copyFileJob, len(mergedEntries)) + + copyFileJobs := make(chan copyFileJob) + defer func() { + if copyFileJobs != nil { + close(copyFileJobs) + } + wg.Wait() + }() + + for i := 0; i < copyGoRoutines; i++ { + wg.Add(1) + jobs := copyFileJobs + + go func() { + defer wg.Done() + for job := range jobs { + found, err := c.findAndCopyFile(dirfd, job.metadata, ©Options, job.mode) + job.err = err + job.found = found + copyResults[job.njob] = job + } + }() + } + + filesToWaitFor := 0 for i, r := range mergedEntries { if options.ForceMask != nil { value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777) @@ -1016,7 +1509,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra return err } defer file.Close() - if err := setFileAttrs(file, mode, &r, options); err != nil { + if err := setFileAttrs(dirfd, file, mode, &r, options, false); err != nil { return err } return nil @@ -1028,7 +1521,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } case tar.TypeDir: - if err := safeMkdir(dirfd, mode, &r, options); err != nil { + if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } continue @@ -1062,74 +1555,95 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra totalChunksSize += r.Size - finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dstFile, mode, &r, options); err != nil { - return err - } + if t == tar.TypeReg { + index := i + njob := filesToWaitFor + job := copyFileJob{ + mode: mode, + metadata: &mergedEntries[index], + index: index, + njob: njob, } - return nil + copyFileJobs <- job + filesToWaitFor++ } + } - found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue - } + close(copyFileJobs) + copyFileJobs = nil - found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks) - if err != nil { - return output, err + wg.Wait() + + for _, res := range copyResults[:filesToWaitFor] { + if res.err != nil { + return output, res.err } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } + // the file was already copied to its destination + // so nothing left to do. + if res.found { continue } - if enableHostDedup { - found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks) - if err != nil { - return output, err - } - if found { - if err := finalizeFile(dstFile); err != nil { - return output, err - } - continue + r := &mergedEntries[res.index] + + missingPartsSize += r.Size + + remainingSize := r.Size + + // the file is missing, attempt to find individual chunks. + for _, chunk := range r.Chunks { + compressedSize := int64(chunk.EndOffset - chunk.Offset) + size := remainingSize + if chunk.ChunkSize > 0 { + size = chunk.ChunkSize } - } + remainingSize = remainingSize - size - missingChunksSize += r.Size - if t == tar.TypeReg { rawChunk := ImageSourceChunk{ - Offset: uint64(r.Offset), - Length: uint64(r.EndOffset - r.Offset), + Offset: uint64(chunk.Offset), + Length: uint64(compressedSize), } - - file := missingFile{ - File: &mergedEntries[i], + file := missingFileChunk{ + File: &mergedEntries[res.index], + CompressedSize: compressedSize, + UncompressedSize: size, } - - missingChunks = append(missingChunks, missingChunk{ - RawChunk: rawChunk, - Files: []missingFile{ + mp := missingPart{ + SourceChunk: &rawChunk, + Chunks: []missingFileChunk{ file, }, - }) + } + + switch chunk.ChunkType { + case internal.ChunkTypeData: + root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk) + if err != nil { + return output, err + } + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + missingPartsSize -= size + mp.OriginFile = &originFile{ + Root: root, + Path: path, + Offset: offset, + } + } + case internal.ChunkTypeZeros: + missingPartsSize -= size + mp.Hole = true + // Mark all chunks belonging to the missing part as holes + for i := range mp.Chunks { + mp.Chunks[i].Hole = true + } + } + missingParts = append(missingParts, mp) } } // There are some missing files. Prepare a multirange request for the missing chunks. - if len(missingChunks) > 0 { - missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks) - if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil { + if len(missingParts) > 0 { + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) + if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { return output, err } } @@ -1141,31 +1655,69 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } if totalChunksSize > 0 { - logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize)) + logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } return output, nil } +func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { + // ignore the metadata files for the estargz format. + if fileType != fileTypeEstargz { + return false + } + switch e.Name { + // ignore the metadata files for the estargz format. + case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName: + return true + } + return false +} + func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) { - var mergedEntries []internal.FileMetadata - var prevEntry *internal.FileMetadata - for _, entry := range entries { - e := entry + countNextChunks := func(start int) int { + count := 0 + for _, e := range entries[start:] { + if e.Type != TypeChunk { + return count + } + count++ + } + return count + } - // ignore the metadata files for the estargz format. - if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) { + size := 0 + for _, entry := range entries { + if mustSkipFile(fileType, entry) { continue } + if entry.Type != TypeChunk { + size++ + } + } + mergedEntries := make([]internal.FileMetadata, size) + m := 0 + for i := 0; i < len(entries); i++ { + e := entries[i] + if mustSkipFile(fileType, e) { + continue + } if e.Type == TypeChunk { - if prevEntry == nil || prevEntry.Type != TypeReg { - return nil, errors.New("chunk type without a regular file") + return nil, fmt.Errorf("chunk type without a regular file") + } + + if e.Type == TypeReg { + nChunks := countNextChunks(i + 1) + + e.Chunks = make([]*internal.FileMetadata, nChunks+1) + for j := 0; j <= nChunks; j++ { + e.Chunks[j] = &entries[i+j] + e.EndOffset = entries[i+j].EndOffset } - prevEntry.EndOffset = e.EndOffset - continue + i += nChunks } - mergedEntries = append(mergedEntries, e) - prevEntry = &e + mergedEntries[m] = e + m++ } // stargz/estargz doesn't store EndOffset so let's calculate it here lastOffset := c.tocOffset @@ -1176,6 +1728,47 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if mergedEntries[i].Offset != 0 { lastOffset = mergedEntries[i].Offset } + + lastChunkOffset := mergedEntries[i].EndOffset + for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { + mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset + lastChunkOffset = mergedEntries[i].Chunks[j].Offset + } } return mergedEntries, nil } + +// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the +// same digest as chunk.ChunkDigest +func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { + parentDirfd, err := unix.Open(root, unix.O_PATH, 0) + if err != nil { + return false + } + defer unix.Close(parentDirfd) + + fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0) + if err != nil { + return false + } + defer fd.Close() + + if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil { + return false + } + + r := io.LimitReader(fd, chunk.ChunkSize) + digester := digest.Canonical.Digester() + + if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { + return false + } + + digest, err := digest.Parse(chunk.ChunkDigest) + if err != nil { + return false + } + + return digester.Digest() == digest +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index e6622cf1466..f6e0cfcfe86 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -12,109 +12,109 @@ type ThinpoolOptionsConfig struct { // grown. This is specified in terms of % of pool size. So a value of // 20 means that when threshold is hit, pool will be grown by 20% of // existing pool size. - AutoExtendPercent string `toml:"autoextend_percent"` + AutoExtendPercent string `toml:"autoextend_percent,omitempty"` // AutoExtendThreshold determines the pool extension threshold in terms // of percentage of pool size. For example, if threshold is 60, that // means when pool is 60% full, threshold has been hit. - AutoExtendThreshold string `toml:"autoextend_threshold"` + AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` // BaseSize specifies the size to use when creating the base device, // which limits the size of images and containers. - BaseSize string `toml:"basesize"` + BaseSize string `toml:"basesize,omitempty"` // BlockSize specifies a custom blocksize to use for the thin pool. - BlockSize string `toml:"blocksize"` + BlockSize string `toml:"blocksize,omitempty"` // DirectLvmDevice specifies a custom block storage device to use for // the thin pool. - DirectLvmDevice string `toml:"directlvm_device"` + DirectLvmDevice string `toml:"directlvm_device,omitempty"` // DirectLvmDeviceForcewipes device even if device already has a // filesystem - DirectLvmDeviceForce string `toml:"directlvm_device_force"` + DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` // Fs specifies the filesystem type to use for the base device. - Fs string `toml:"fs"` + Fs string `toml:"fs,omitempty"` // log_level sets the log level of devicemapper. - LogLevel string `toml:"log_level"` + LogLevel string `toml:"log_level,omitempty"` // MetadataSize specifies the size of the metadata for the thinpool // It will be used with the `pvcreate --metadata` option. - MetadataSize string `toml:"metadatasize"` + MetadataSize string `toml:"metadatasize,omitempty"` // MinFreeSpace specifies the min free space percent in a thin pool // require for new device creation to - MinFreeSpace string `toml:"min_free_space"` + MinFreeSpace string `toml:"min_free_space,omitempty"` // MkfsArg specifies extra mkfs arguments to be used when creating the // basedevice. - MkfsArg string `toml:"mkfsarg"` + MkfsArg string `toml:"mkfsarg,omitempty"` // MountOpt specifies extra mount options used when mounting the thin // devices. - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // UseDeferredDeletion marks device for deferred deletion - UseDeferredDeletion string `toml:"use_deferred_deletion"` + UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` // UseDeferredRemoval marks device for deferred removal - UseDeferredRemoval string `toml:"use_deferred_removal"` + UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of // retries XFS should attempt to complete IO when ENOSPC (no space) // error is returned by underlying storage device. - XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"` + XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` } type AufsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` } type BtrfsOptionsConfig struct { // MinSpace is the minimal spaces allocated to the device - MinSpace string `toml:"min_space"` + MinSpace string `toml:"min_space,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } type OverlayOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // Inodes is used to set a maximum inodes of the container image. - Inodes string `toml:"inodes"` + Inodes string `toml:"inodes,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories - ForceMask string `toml:"force_mask"` + ForceMask string `toml:"force_mask,omitempty"` } type VfsOptionsConfig struct { // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` } type ZfsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // Name is the File System name of the ZFS File system - Name string `toml:"fsname"` + Name string `toml:"fsname,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` } // OptionsConfig represents the "storage.options" TOML config table. @@ -122,82 +122,82 @@ type OptionsConfig struct { // AdditionalImagesStores is the location of additional read/only // Image stores. Usually used to access Networked File System // for shared image content - AdditionalImageStores []string `toml:"additionalimagestores"` + AdditionalImageStores []string `toml:"additionalimagestores,omitempty"` // AdditionalLayerStores is the location of additional read/only // Layer stores. Usually used to access Networked File System // for shared image content // This API is experimental and can be changed without bumping the // major version number. - AdditionalLayerStores []string `toml:"additionallayerstores"` + AdditionalLayerStores []string `toml:"additionallayerstores,omitempty"` // Size - Size string `toml:"size"` + Size string `toml:"size,omitempty"` // RemapUIDs is a list of default UID mappings to use for layers. - RemapUIDs string `toml:"remap-uids"` + RemapUIDs string `toml:"remap-uids,omitempty"` // RemapGIDs is a list of default GID mappings to use for layers. - RemapGIDs string `toml:"remap-gids"` + RemapGIDs string `toml:"remap-gids,omitempty"` // IgnoreChownErrors is a flag for whether chown errors should be // ignored when building an image. - IgnoreChownErrors string `toml:"ignore_chown_errors"` + IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories. - ForceMask os.FileMode `toml:"force_mask"` + ForceMask os.FileMode `toml:"force_mask,omitempty"` // RemapUser is the name of one or more entries in /etc/subuid which // should be used to set up default UID mappings. - RemapUser string `toml:"remap-user"` + RemapUser string `toml:"remap-user,omitempty"` // RemapGroup is the name of one or more entries in /etc/subgid which // should be used to set up default GID mappings. - RemapGroup string `toml:"remap-group"` + RemapGroup string `toml:"remap-group,omitempty"` // RootAutoUsernsUser is the name of one or more entries in /etc/subuid and // /etc/subgid which should be used to set up automatically a userns. - RootAutoUsernsUser string `toml:"root-auto-userns-user"` + RootAutoUsernsUser string `toml:"root-auto-userns-user,omitempty"` // AutoUsernsMinSize is the minimum size for a user namespace that is // created automatically. - AutoUsernsMinSize uint32 `toml:"auto-userns-min-size"` + AutoUsernsMinSize uint32 `toml:"auto-userns-min-size,omitempty"` // AutoUsernsMaxSize is the maximum size for a user namespace that is // created automatically. - AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size"` + AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` // Aufs container options to be handed to aufs drivers - Aufs struct{ AufsOptionsConfig } `toml:"aufs"` + Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` // Btrfs container options to be handed to btrfs drivers - Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"` + Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` // Thinpool container options to be handed to thinpool drivers - Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` // Overlay container options to be handed to overlay drivers - Overlay struct{ OverlayOptionsConfig } `toml:"overlay"` + Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` // Vfs container options to be handed to VFS drivers - Vfs struct{ VfsOptionsConfig } `toml:"vfs"` + Vfs struct{ VfsOptionsConfig } `toml:"vfs,omitempty"` // Zfs container options to be handed to ZFS drivers - Zfs struct{ ZfsOptionsConfig } `toml:"zfs"` + Zfs struct{ ZfsOptionsConfig } `toml:"zfs,omitempty"` // Do not create a bind mount on the storage home - SkipMountHome string `toml:"skip_mount_home"` + SkipMountHome string `toml:"skip_mount_home,omitempty"` // Alternative program to use for the mount of the file system - MountProgram string `toml:"mount_program"` + MountProgram string `toml:"mount_program,omitempty"` // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt"` + MountOpt string `toml:"mountopt,omitempty"` // PullOptions specifies options to be handed to pull managers // This API is experimental and can be changed without bumping the major version number. - PullOptions map[string]string `toml:"pull_options"` + PullOptions map[string]string `toml:"pull_options,omitempty"` // DisableVolatile doesn't allow volatile mounts when it is set. - DisableVolatile bool `toml:"disable-volatile"` + DisableVolatile bool `toml:"disable-volatile,omitempty"` } // GetGraphDriverOptions returns the driver specific options diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000000..85c5e76c844 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,52 @@ +package homedir + +import ( + "errors" + "os" + "path/filepath" +) + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 06b53854b93..027db259c19 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -18,18 +18,3 @@ func GetRuntimeDir() (string, error) { func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} - -// GetCacheHome is unsupported on non-linux system. -func GetCacheHome() (string, error) { - return "", errors.New("homedir.GetCacheHome() is not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 2475e351bb5..33177bdf306 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -93,48 +93,3 @@ func stick(f string) error { m |= os.ModeSticky return os.Chmod(f, m) } - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - -// GetCacheHome returns XDG_CACHE_HOME. -// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetCacheHome() (string, error) { - if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { - return xdgCacheHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CACHE_HOME or HOME") - } - return filepath.Join(home, ".cache"), nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index 4f2615ed32f..af65f2c03de 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -17,7 +17,12 @@ func Key() string { // environment variables depending on the target operating system. // Returned path should be used with "path/filepath" to form new paths. func Get() string { - return os.Getenv(Key()) + home := os.Getenv(Key()) + if home != "" { + return home + } + home, _ = os.UserHomeDir() + return home } // GetShortcutString returns the string that is shortcut to user's home directory diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 83bc8c34ff4..a19ba288b40 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -82,7 +82,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(uidMap) == 1 && uidMap[0].Size == 1 { uid = uidMap[0].HostID } else { - uid, err = toHost(0, uidMap) + uid, err = RawToHost(0, uidMap) if err != nil { return -1, -1, err } @@ -90,7 +90,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { if len(gidMap) == 1 && gidMap[0].Size == 1 { gid = gidMap[0].HostID } else { - gid, err = toHost(0, gidMap) + gid, err = RawToHost(0, gidMap) if err != nil { return -1, -1, err } @@ -98,10 +98,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { return uid, gid, nil } -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { +// RawToContainer takes an id mapping, and uses it to translate a host ID to +// the remapped ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -114,10 +118,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { +// RawToHost takes an id mapping and a remapped ID, and translates the ID to +// the mapped host ID. If no map is provided, then the translation assumes a +// 1-to-1 mapping and returns the passed in id. +// +// If you wish to map a (uid,gid) combination you should use the corresponding +// IDMappings methods, which ensure that you are mapping the correct ID against +// the correct mapping. +func RawToHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -185,28 +193,24 @@ func (i *IDMappings) RootPair() IDPair { // Remapping is only performed if the ids aren't already the remapped root ids func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { var err error - target := i.RootPair() + var target IDPair - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) - if err != nil { - return target, err - } + target.UID, err = RawToHost(pair.UID, i.uids) + if err != nil { + return target, err } - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) - } + target.GID, err = RawToHost(pair.GID, i.gids) return target, err } // ToContainer returns the container UID and GID for the host uid and gid func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) + uid, err := RawToContainer(pair.UID, i.uids) if err != nil { return -1, -1, err } - gid, err := toContainer(pair.GID, i.gids) + gid, err := RawToContainer(pair.GID, i.gids) return uid, gid, err } @@ -293,7 +297,7 @@ func parseSubidFile(path, username string) (ranges, error) { func checkChownErr(err error, name string, uid, gid int) error { if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { - return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate", uid, gid, name) } return err } diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go index db50a62e4c0..6e6e3b22bc9 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go @@ -12,11 +12,21 @@ import ( #cgo LDFLAGS: -l subid #include #include +#include const char *Prog = "storage"; +FILE *shadow_logfd = NULL; + struct subid_range get_range(struct subid_range *ranges, int i) { - return ranges[i]; + shadow_logfd = stderr; + return ranges[i]; } + +#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) +# define subid_get_uid_ranges get_subuid_ranges +# define subid_get_gid_ranges get_subgid_ranges +#endif + */ import "C" @@ -32,9 +42,9 @@ func readSubid(username string, isUser bool) (ranges, error) { var nRanges C.int var cRanges *C.struct_subid_range if isUser { - nRanges = C.get_subuid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) } else { - nRanges = C.get_subgid_ranges(cUsername, &cRanges) + nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) } if nRanges < 0 { return nil, errors.New("cannot read subids") diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 9776b2a1287..7f270c61f82 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -46,6 +46,9 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path + if !filepath.IsAbs(dirPath) { + return fmt.Errorf("path: %s should be absolute", dirPath) + } for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go index 372bee7321f..d3dd86d349f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -17,6 +17,7 @@ func Self() string { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // This will use the in-memory version (/proc/self/exe) of the current binary, // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go index 1ecaa906fed..9dd8cb9bbee 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -17,6 +17,7 @@ func Self() string { // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will // be set to "/usr/bin/docker". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -24,6 +25,7 @@ func Command(args ...string) *exec.Cmd { // CommandContext returns *exec.Cmd which has Path as current binary. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go index 9d937426854..5b3605f319c 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -9,10 +9,12 @@ import ( // Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } // CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() return nil } diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go index 673ab476abd..d868564767f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -17,6 +17,7 @@ func Self() string { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func Command(args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.Command(Self()) cmd.Args = args return cmd @@ -26,6 +27,7 @@ func Command(args ...string) *exec.Cmd { // For example if current binary is "docker.exe" at "C:\", then cmd.Path will // be set to "C:\docker.exe". func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + panicIfNotInitialized() cmd := exec.CommandContext(ctx, Self()) cmd.Args = args return cmd diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go index c56671d9192..a1938cd4f34 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -7,7 +7,10 @@ import ( "path/filepath" ) -var registeredInitializers = make(map[string]func()) +var ( + registeredInitializers = make(map[string]func()) + initWasCalled = false +) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { @@ -22,6 +25,7 @@ func Register(name string, initializer func()) { // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] + initWasCalled = true if exists { initializer() @@ -30,6 +34,21 @@ func Init() bool { return false } +func panicIfNotInitialized() { + if !initWasCalled { + // The reexec package is used to run subroutines in + // subprocesses which would otherwise have unacceptable side + // effects on the main thread. If you found this error, then + // your program uses a package which needs to do this. In + // order for that to work, main() should start with this + // boilerplate, or an equivalent: + // if reexec.Init() { + // return + // } + panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") + } +} + func naiveSelf() string { name := os.Args[0] if filepath.Base(name) == name { diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go index 6d351ce80a9..c352efce0aa 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -9,6 +9,7 @@ import ( "io" "os" "os/exec" + "os/signal" "os/user" "runtime" "strconv" @@ -484,6 +485,30 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) { // Finish up. logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + + // Forward SIGHUP, SIGINT, and SIGTERM to our child process. + interrupted := make(chan os.Signal, 100) + defer func() { + signal.Stop(interrupted) + close(interrupted) + }() + cmd.Hook = func(int) error { + go func() { + for receivedSignal := range interrupted { + cmd.Cmd.Process.Signal(receivedSignal) + } + }() + return nil + } + signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + // Make sure our child process gets SIGKILLed if we exit, for whatever + // reason, before it does. + if cmd.Cmd.SysProcAttr == nil { + cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL + ExecRunnable(cmd, nil) } @@ -501,11 +526,11 @@ func ExecRunnable(cmd Runnable, cleanup func()) { if exitError.ProcessState.Exited() { if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { if waitStatus.Exited() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(waitStatus.ExitStatus()) } if waitStatus.Signaled() { - logrus.Errorf("%v", exitError) + logrus.Debugf("%v", exitError) exit(int(waitStatus.Signal()) + 128) } } diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 722750c0cca..c17dd6d37ea 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -1,5 +1,14 @@ # This file is is the configuration file for all tools -# that use the containers/storage library. +# that use the containers/storage library. The storage.conf file +# overrides all other storage.conf files. Container engines using the +# container/storage library do not inherit fields from other storage.conf +# files. +# +# Note: The storage.conf file overrides other storage.conf files based on this precedence: +# /usr/containers/storage.conf +# /etc/containers/storage.conf +# $HOME/.config/containers/storage.conf +# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) # See man 5 containers-storage.conf for more information # The "container storage" table contains all of the server options. [storage] @@ -11,8 +20,14 @@ driver = "overlay" runroot = "/run/containers/storage" # Primary Read/Write location of container storage +# When changing the graphroot location on an SELINUX system, you must +# ensure the labeling matches the default locations labels with the +# following commands: +# semanage fcontext -a -e /var/lib/containers/storage /NEWSTORAGEPATH +# restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" + # Storage path for rootless users # # rootless_storage_path = "$HOME/.local/share/containers/storage" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 169c7d1513e..6b40b68cac0 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -31,6 +31,14 @@ import ( "github.com/pkg/errors" ) +type updateNameOperation int + +const ( + setNames updateNameOperation = iota + addNames + removeNames +) + var ( stores []*store storesLock sync.Mutex @@ -368,8 +376,17 @@ type Store interface { // SetNames changes the list of names for a layer, image, or container. // Duplicate names are removed from the list automatically. + // Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. SetNames(id string, names []string) error + // AddNames adds the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + AddNames(id string, names []string) error + + // RemoveNames removes the list of names for a layer, image, or container. + // Duplicate names are removed from the list automatically. + RemoveNames(id string, names []string) error + // ListImageBigData retrieves a list of the (possibly large) chunks of // named data associated with an image. ListImageBigData(id string) ([]string, error) @@ -575,10 +592,11 @@ type ContainerOptions struct { // container's layer will inherit settings from the image's top layer // or, if it is not being created based on an image, the Store object. types.IDMappingOptions - LabelOpts []string - Flags map[string]interface{} - MountOpts []string - Volatile bool + LabelOpts []string + Flags map[string]interface{} + MountOpts []string + Volatile bool + StorageOpt map[string]string } type store struct { @@ -646,17 +664,21 @@ func GetStore(options types.StoreOptions) (Store, error) { storesLock.Lock() defer storesLock.Unlock() + // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first for _, s := range stores { - if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { return s, nil } } - if options.GraphRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified") - } - if options.RunRoot == "" { - return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified") + // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither. + switch { + case options.RunRoot == "" && options.GraphRoot == "": + return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified") + case options.GraphRoot == "": + options.GraphRoot = types.Options().GraphRoot + case options.RunRoot == "": + options.RunRoot = types.Options().RunRoot } if err := os.MkdirAll(options.RunRoot, 0700); err != nil { @@ -1384,7 +1406,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Flags["MountLabel"] = mountLabel } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true) + clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true) if err != nil { return nil, err } @@ -1608,7 +1630,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) { } } if foundImage { - return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id) + return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id) } return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) } @@ -2045,7 +2067,20 @@ func dedupeNames(names []string) []string { return deduped } +// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`. func (s *store) SetNames(id string, names []string) error { + return s.updateNames(id, names, setNames) +} + +func (s *store) AddNames(id string, names []string) error { + return s.updateNames(id, names, addNames) +} + +func (s *store) RemoveNames(id string, names []string) error { + return s.updateNames(id, names, removeNames) +} + +func (s *store) updateNames(id string, names []string, op updateNameOperation) error { deduped := dedupeNames(names) rlstore, err := s.LayerStore() @@ -2058,7 +2093,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rlstore.Exists(id) { - return rlstore.SetNames(id, deduped) + switch op { + case setNames: + return rlstore.SetNames(id, deduped) + case removeNames: + return rlstore.RemoveNames(id, deduped) + case addNames: + return rlstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } ristore, err := s.ImageStore() @@ -2071,7 +2115,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if ristore.Exists(id) { - return ristore.SetNames(id, deduped) + switch op { + case setNames: + return ristore.SetNames(id, deduped) + case removeNames: + return ristore.RemoveNames(id, deduped) + case addNames: + return ristore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } // Check is id refers to a RO Store @@ -2109,7 +2162,16 @@ func (s *store) SetNames(id string, names []string) error { return err } if rcstore.Exists(id) { - return rcstore.SetNames(id, deduped) + switch op { + case setNames: + return rcstore.SetNames(id, deduped) + case removeNames: + return rcstore.RemoveNames(id, deduped) + case addNames: + return rcstore.AddNames(id, deduped) + default: + return errInvalidUpdateNameOperation + } } return ErrLayerUnknown } @@ -2370,22 +2432,16 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if err != nil { return nil, err } - childrenByParent := make(map[string]*[]string) + childrenByParent := make(map[string][]string) for _, layer := range layers { - parent := layer.Parent - if list, ok := childrenByParent[parent]; ok { - newList := append(*list, layer.ID) - childrenByParent[parent] = &newList - } else { - childrenByParent[parent] = &([]string{layer.ID}) - } + childrenByParent[layer.Parent] = append(childrenByParent[layer.Parent], layer.ID) } - otherImagesByTopLayer := make(map[string]string) + otherImagesTopLayers := make(map[string]struct{}) for _, img := range images { if img.ID != id { - otherImagesByTopLayer[img.TopLayer] = img.ID + otherImagesTopLayers[img.TopLayer] = struct{}{} for _, layerID := range img.MappedTopLayers { - otherImagesByTopLayer[layerID] = img.ID + otherImagesTopLayers[layerID] = struct{}{} } } } @@ -2395,43 +2451,46 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) } } layer := image.TopLayer - lastRemoved := "" + layersToRemoveMap := make(map[string]struct{}) for layer != "" { if rcstore.Exists(layer) { break } - if _, ok := otherImagesByTopLayer[layer]; ok { + if _, used := otherImagesTopLayers[layer]; used { break } parent := "" if l, err := rlstore.Get(layer); err == nil { parent = l.Parent } - hasOtherRefs := func() bool { + hasChildrenNotBeingRemoved := func() bool { layersToCheck := []string{layer} if layer == image.TopLayer { layersToCheck = append(layersToCheck, image.MappedTopLayers...) } for _, layer := range layersToCheck { - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { - return true + if childList := childrenByParent[layer]; len(childList) > 0 { + for _, child := range childList { + if _, childIsSlatedForRemoval := layersToRemoveMap[child]; childIsSlatedForRemoval { + continue } + return true } } } return false } - if hasOtherRefs() { + if hasChildrenNotBeingRemoved() { break } - lastRemoved = layer if layer == image.TopLayer { layersToRemove = append(layersToRemove, image.MappedTopLayers...) + for _, mappedTopLayer := range image.MappedTopLayers { + layersToRemoveMap[mappedTopLayer] = struct{}{} + } } - layersToRemove = append(layersToRemove, lastRemoved) + layersToRemove = append(layersToRemove, layer) + layersToRemoveMap[layer] = struct{}{} layer = parent } } else { @@ -2499,23 +2558,29 @@ func (s *store) DeleteContainer(id string) error { gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) wg.Add(1) go func() { - var err error - for attempts := 0; attempts < 50; attempts++ { - err = os.RemoveAll(gcpath) - if err == nil || !system.IsEBUSY(err) { - break - } - time.Sleep(time.Millisecond * 100) + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(gcpath) + if err == nil { + errChan <- nil + return } - errChan <- err - wg.Done() + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(gcpath) }() rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) wg.Add(1) go func() { - errChan <- os.RemoveAll(rcpath) - wg.Done() + defer wg.Done() + // attempt a simple rm -rf first + err := os.RemoveAll(rcpath) + if err == nil { + errChan <- nil + return + } + // and if it fails get to the more complicated cleanup + errChan <- system.EnsureRemoveAll(rcpath) }() go func() { @@ -2524,17 +2589,12 @@ func (s *store) DeleteContainer(id string) error { }() var errors []error - for { - select { - case err, ok := <-errChan: - if !ok { - return multierror.Append(nil, errors...).ErrorOrNil() - } - if err != nil { - errors = append(errors, err) - } + for err := range errChan { + if err != nil { + errors = append(errors, err) } } + return multierror.Append(nil, errors...).ErrorOrNil() } } return ErrNotAContainer @@ -2830,10 +2890,33 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro if err != nil { return nil, err } + + // NaiveDiff could cause mounts to happen without a lock, so be safe + // and treat the .Diff operation as a Mount. + s.graphLock.Lock() + defer s.graphLock.Unlock() + + modified, err := s.graphLock.Modified() + if err != nil { + return nil, err + } + + // We need to make sure the home mount is present when the Mount is done. + if modified { + s.graphDriver = nil + s.layerStore = nil + s.graphDriver, err = s.getGraphDriver() + if err != nil { + return nil, err + } + s.lastLoaded = time.Now() + } + for _, s := range append([]ROLayerStore{lstore}, lstores...) { store := s store.RLock() if err := store.ReloadIfChanged(); err != nil { + store.Unlock() return nil, err } if store.Exists(to) { diff --git a/vendor/github.com/containers/storage/types/errors.go b/vendor/github.com/containers/storage/types/errors.go index d920d12eb50..ad12ffdbf2d 100644 --- a/vendor/github.com/containers/storage/types/errors.go +++ b/vendor/github.com/containers/storage/types/errors.go @@ -55,4 +55,6 @@ var ( ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") // ErrNotSupported is returned when the requested functionality is not supported. ErrNotSupported = errors.New("not supported") + // ErrInvalidMappings is returned when the specified mappings are invalid. + ErrInvalidMappings = errors.New("invalid mappings specified") ) diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index f9bf7e6b658..a71c6d2efd8 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -3,44 +3,73 @@ package types import ( "fmt" "os" - "os/exec" "path/filepath" "strings" "sync" "time" "github.com/BurntSushi/toml" - "github.com/containers/storage/drivers/overlay" cfg "github.com/containers/storage/pkg/config" "github.com/containers/storage/pkg/idtools" "github.com/sirupsen/logrus" ) // TOML-friendly explicit tables used for conversions. -type tomlConfig struct { +type TomlConfig struct { Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - RootlessStoragePath string `toml:"rootless_storage_path"` - Options cfg.OptionsConfig `toml:"options"` + Driver string `toml:"driver,omitempty"` + RunRoot string `toml:"runroot,omitempty"` + GraphRoot string `toml:"graphroot,omitempty"` + RootlessStoragePath string `toml:"rootless_storage_path,omitempty"` + Options cfg.OptionsConfig `toml:"options,omitempty"` } `toml:"storage"` } +const ( + // these are default path for run and graph root for rootful users + // for rootless path is constructed via getRootlessStorageOpts + defaultRunRoot string = "/run/containers/storage" + defaultGraphRoot string = "/var/lib/containers/storage" +) + // defaultConfigFile path to the system wide storage.conf file var ( - defaultConfigFile = "/etc/containers/storage.conf" - defaultConfigFileSet = false + defaultConfigFile = "/usr/share/containers/storage.conf" + defaultOverrideConfigFile = "/etc/containers/storage.conf" + defaultConfigFileSet = false // DefaultStoreOptions is a reasonable default set of options. defaultStoreOptions StoreOptions ) +const ( + overlayDriver = "overlay" + overlay2 = "overlay2" +) + func init() { - defaultStoreOptions.RunRoot = "/run/containers/storage" - defaultStoreOptions.GraphRoot = "/var/lib/containers/storage" + defaultStoreOptions.RunRoot = defaultRunRoot + defaultStoreOptions.GraphRoot = defaultGraphRoot defaultStoreOptions.GraphDriverName = "" - ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + // The DefaultConfigFile(rootless) function returns the path + // of the used storage.conf file, by returning defaultConfigFile + // If override exists containers/storage uses it by default. + defaultConfigFile = defaultOverrideConfigFile + ReloadConfigurationFileIfNeeded(defaultOverrideConfigFile, &defaultStoreOptions) + } else { + if !os.IsNotExist(err) { + logrus.Warningf("Attempting to use %s, %v", defaultConfigFile, err) + } + ReloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions) + } + // reload could set values to empty for run and graph root if config does not contains anything + if defaultStoreOptions.RunRoot == "" { + defaultStoreOptions.RunRoot = defaultRunRoot + } + if defaultStoreOptions.GraphRoot == "" { + defaultStoreOptions.GraphRoot = defaultGraphRoot + } } // defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. @@ -168,7 +197,6 @@ func isRootlessDriver(driver string) bool { // getRootlessStorageOpts returns the storage opts for containers running as non root func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions - const overlayDriver = "overlay" dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) if err != nil { @@ -190,25 +218,16 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti if driver := os.Getenv("STORAGE_DRIVER"); driver != "" { opts.GraphDriverName = driver } - if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver { - supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime) - if err != nil { - return opts, err - } - if supported { - opts.GraphDriverName = overlayDriver - } else { - if path, err := exec.LookPath("fuse-overlayfs"); err == nil { - opts.GraphDriverName = overlayDriver - opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} - } - } - if opts.GraphDriverName == overlayDriver { - for _, o := range systemOpts.GraphDriverOptions { - if strings.Contains(o, "ignore_chown_errors") { - opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) - break - } + if opts.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + opts.GraphDriverName = overlayDriver + } + + if opts.GraphDriverName == overlayDriver { + for _, o := range systemOpts.GraphDriverOptions { + if strings.Contains(o, "ignore_chown_errors") { + opts.GraphDriverOptions = append(opts.GraphDriverOptions, o) + break } } } @@ -271,7 +290,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio // ReloadConfigurationFile parses the specified configuration file and overrides // the configuration in storeOptions. func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { - config := new(tomlConfig) + config := new(TomlConfig) meta, err := toml.DecodeFile(configFile, &config) if err == nil { @@ -286,7 +305,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { } } - // Clear storeOptions of previos settings + // Clear storeOptions of previous settings *storeOptions = StoreOptions{} if config.Storage.Driver != "" { storeOptions.GraphDriverName = config.Storage.Driver @@ -295,6 +314,10 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { config.Storage.Driver = os.Getenv("STORAGE_DRIVER") storeOptions.GraphDriverName = config.Storage.Driver } + if storeOptions.GraphDriverName == overlay2 { + logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") + storeOptions.GraphDriverName = overlayDriver + } if storeOptions.GraphDriverName == "" { logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) } @@ -385,3 +408,39 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { func Options() StoreOptions { return defaultStoreOptions } + +// Save overwrites the tomlConfig in storage.conf with the given conf +func Save(conf TomlConfig, rootless bool) error { + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return err + } + + if err = os.Remove(configFile); !os.IsNotExist(err) && err != nil { + return err + } + + f, err := os.Create(configFile) + if err != nil { + return err + } + + return toml.NewEncoder(f).Encode(conf) +} + +// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it +func StorageConfig(rootless bool) (*TomlConfig, error) { + config := new(TomlConfig) + + configFile, err := DefaultConfigFile(rootless) + if err != nil { + return nil, err + } + + _, err = toml.DecodeFile(configFile, &config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 80d56041b06..cec377f26a9 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -40,3 +40,35 @@ func validateMountOptions(mountOptions []string) error { } return nil } + +func applyNameOperation(oldNames []string, opParameters []string, op updateNameOperation) ([]string, error) { + result := make([]string, 0) + switch op { + case setNames: + // ignore all old names and just return new names + return dedupeNames(opParameters), nil + case removeNames: + // remove given names from old names + for _, name := range oldNames { + // only keep names in final result which do not intersect with input names + // basically `result = oldNames - opParameters` + nameShouldBeRemoved := false + for _, opName := range opParameters { + if name == opName { + nameShouldBeRemoved = true + } + } + if !nameShouldBeRemoved { + result = append(result, name) + } + } + return dedupeNames(result), nil + case addNames: + result = append(result, opParameters...) + result = append(result, oldNames...) + return dedupeNames(result), nil + default: + return result, errInvalidUpdateNameOperation + } + return dedupeNames(result), nil +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 0668a66cf70..be2b3436062 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -15,7 +15,7 @@ type roffRenderer struct { extensions blackfriday.Extensions listCounters []int firstHeader bool - defineTerm bool + firstDD bool listDepth int } @@ -42,7 +42,8 @@ const ( quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" tableStart = "\n.TS\nallbox;\n" tableEnd = ".TE\n" tableCellStart = "T{\n" @@ -90,7 +91,7 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering switch node.Type { case blackfriday.Text: - r.handleText(w, node, entering) + escapeSpecialChars(w, node.Literal) case blackfriday.Softbreak: out(w, crTag) case blackfriday.Hardbreak: @@ -150,40 +151,21 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, codeCloseTag) case blackfriday.Table: r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) case blackfriday.TableHead: case blackfriday.TableBody: case blackfriday.TableRow: // no action as cell entries do all the nroff formatting return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags default: fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) } return walkAction } -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { if entering { switch node.Level { @@ -230,15 +212,20 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering if node.ListFlags&blackfriday.ListTypeOrdered != 0 { out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { + // DT (definition term): line just before DD (see below). + out(w, dtTag) + r.firstDD = true } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true + // DD (definition description): line that starts with ": ". + // + // We have to distinguish between the first DD and the + // subsequent ones, as there should be no vertical + // whitespace between the DT and the first DD. + if r.firstDD { + r.firstDD = false } else { - r.defineTerm = false + out(w, dd2Tag) } } else { out(w, ".IP \\(bu 2\n") @@ -251,7 +238,7 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { if entering { out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced + // call walker to count cells (and rows?) so format section can be produced columns := countColumns(node) out(w, strings.Repeat("l ", columns)+"\n") out(w, strings.Repeat("l ", columns)+".\n") @@ -261,28 +248,41 @@ func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering } func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } if entering { + var start string if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) + start = "\t" + } + if node.IsHeader { + start += codespanTag + } else if nodeLiteralSize(node) > 30 { + start += tableCellStart } + out(w, start) } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag + var end string + if node.IsHeader { + end = codespanCloseTag + } else if nodeLiteralSize(node) > 30 { + end = tableCellEnd + } + if node.Next == nil && end != tableCellEnd { + // Last cell: need to carriage return if we are at the end of the + // header row and content isn't wrapped in a "tablecell" + end += crTag } out(w, end) } } +func nodeLiteralSize(node *blackfriday.Node) int { + total := 0 + for n := node.FirstChild; n != nil; n = n.FirstChild { + total += len(n.Literal) + } + return total +} + // because roff format requires knowing the column count before outputting any table // data we need to walk a table tree and count the columns func countColumns(node *blackfriday.Node) int { @@ -309,15 +309,6 @@ func out(w io.Writer, output string) { io.WriteString(w, output) // nolint: errcheck } -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - func escapeSpecialChars(w io.Writer, text []byte) { for i := 0; i < len(text); i++ { // escape initial apostrophe or period @@ -328,7 +319,7 @@ func escapeSpecialChars(w io.Writer, text []byte) { // directly copy normal characters org := i - for i < len(text) && !needsBackslash(text[i]) { + for i < len(text) && text[i] != '\\' { i++ } if i > org { diff --git a/vendor/github.com/docker/distribution/.golangci.yml b/vendor/github.com/docker/distribution/.golangci.yml new file mode 100644 index 00000000000..1ba6cb91623 --- /dev/null +++ b/vendor/github.com/docker/distribution/.golangci.yml @@ -0,0 +1,20 @@ +linters: + enable: + - structcheck + - varcheck + - staticcheck + - unconvert + - gofmt + - goimports + - golint + - ineffassign + - vet + - unused + - misspell + disable: + - errcheck + +run: + deadline: 2m + skip-dirs: + - vendor diff --git a/vendor/github.com/docker/distribution/.gometalinter.json b/vendor/github.com/docker/distribution/.gometalinter.json deleted file mode 100644 index 9df5b14bcb2..00000000000 --- a/vendor/github.com/docker/distribution/.gometalinter.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "Vendor": true, - "Deadline": "2m", - "Sort": ["linter", "severity", "path", "line"], - "EnableGC": true, - "Enable": [ - "structcheck", - "staticcheck", - "unconvert", - - "gofmt", - "goimports", - "golint", - "vet" - ] -} diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index 0f48321d497..8f3738f3d0e 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -30,3 +30,20 @@ Helen Xie Helen-xie Mike Brown Mike Brown Manish Tomar Manish Tomar Sakeven Jiang sakeven +Milos Gajdos Milos Gajdos +Derek McGowan Derek McGowa +Adrian Plata Adrian Plata <@users.noreply.github.com> +Sebastiaan van Stijn Sebastiaan van Stijn +Vishesh Jindal Vishesh Jindal +Wang Yan Wang Yan +Chris Patterson Chris Patterson +Eohyung Lee Eohyung Lee +João Pereira <484633+joaodrp@users.noreply.github.com> +Smasherr Smasherr +Thomas Berger Thomas Berger +Samuel Karp Samuel Karp +Justin Cormack +sayboras +CrazyMax +CrazyMax <1951866+crazy-max@users.noreply.github.com> +CrazyMax diff --git a/vendor/github.com/docker/distribution/.travis.yml b/vendor/github.com/docker/distribution/.travis.yml deleted file mode 100644 index 44ced60451d..00000000000 --- a/vendor/github.com/docker/distribution/.travis.yml +++ /dev/null @@ -1,51 +0,0 @@ -dist: trusty -sudo: required -# setup travis so that we can run containers for integration tests -services: - - docker - -language: go - -go: - - "1.11.x" - -go_import_path: github.com/docker/distribution - -addons: - apt: - packages: - - python-minimal - - -env: - - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 - -before_install: - - uname -r - - sudo apt-get -q update - -install: - - go get -u github.com/vbatts/git-validation - # TODO: Add enforcement of license - # - go get -u github.com/kunalkushwaha/ltag - - cd $TRAVIS_BUILD_DIR - -script: - - export GOOS=$TRAVIS_GOOS - - export CGO_ENABLED=$TRAVIS_CGO_ENABLED - - DCO_VERBOSITY=-q script/validate/dco - - GOOS=linux script/setup/install-dev-tools - - script/validate/vendor - - go build -i . - - make check - - make build - - make binaries - # Currently takes too long - #- if [ "$GOOS" = "linux" ]; then make test-race ; fi - - if [ "$GOOS" = "linux" ]; then make coverage ; fi - -after_success: - - bash <(curl -s https://codecov.io/bash) -F linux - -before_deploy: - # Run tests with storage driver configurations diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index 9537817ca3a..ae8c040c735 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,22 +1,49 @@ -FROM golang:1.11-alpine AS build +# syntax=docker/dockerfile:1.3 -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV BUILDTAGS include_oss include_gcs +ARG GO_VERSION=1.16.15 +ARG GORELEASER_XX_VERSION=1.2.5 -ARG GOOS=linux -ARG GOARCH=amd64 -ARG GOARM=6 +FROM --platform=$BUILDPLATFORM crazymax/goreleaser-xx:${GORELEASER_XX_VERSION} AS goreleaser-xx +FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base +COPY --from=goreleaser-xx / / +RUN apk add --no-cache file git +WORKDIR /go/src/github.com/docker/distribution -RUN set -ex \ - && apk add --no-cache make git file +FROM base AS build +ENV GO111MODULE=auto +ENV CGO_ENABLED=0 +# GIT_REF is used by goreleaser-xx to handle the proper git ref when available. +# It will fallback to the working tree info if empty and use "git tag --points-at" +# or "git describe" to define the version info. +ARG GIT_REF +ARG TARGETPLATFORM +ARG PKG="github.com/distribution/distribution" +ARG BUILDTAGS="include_oss include_gcs" +RUN --mount=type=bind,rw \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=target=/go/pkg/mod,type=cache \ + goreleaser-xx --debug \ + --name="registry" \ + --dist="/out" \ + --main="./cmd/registry" \ + --flags="-v" \ + --ldflags="-s -w -X '$PKG/version.Version={{.Version}}' -X '$PKG/version.Revision={{.Commit}}' -X '$PKG/version.Package=$PKG'" \ + --tags="$BUILDTAGS" \ + --files="LICENSE" \ + --files="README.md" -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" +FROM scratch AS artifact +COPY --from=build /out/*.tar.gz / +COPY --from=build /out/*.zip / +COPY --from=build /out/*.sha256 / -FROM alpine +FROM scratch AS binary +COPY --from=build /usr/local/bin/registry* / + +FROM alpine:3.14 +RUN apk add --no-cache ca-certificates COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml -COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry +COPY --from=build /usr/local/bin/registry /bin/registry VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 4635c6eca87..331da27328d 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -50,7 +50,7 @@ version/version.go: check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") @echo "$(WHALE) $@" - gometalinter --config .gometalinter.json ./... + golangci-lint run test: ## run tests, except integration test with test.short @echo "$(WHALE) $@" diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index 998878850cd..e513c18e969 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -2,7 +2,7 @@ The Docker toolset to pack, ship, store, and deliver content. -This repository's main product is the Docker Registry 2.0 implementation +This repository provides the Docker Registry 2.0 implementation for storing and distributing Docker images. It supersedes the [docker/docker-registry](https://github.com/docker/docker-registry) project with a new API design, focused around security and performance. diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index c0e9261be93..2a659eaa368 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -10,7 +10,7 @@ import ( "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) var ( diff --git a/vendor/github.com/docker/distribution/docker-bake.hcl b/vendor/github.com/docker/distribution/docker-bake.hcl new file mode 100644 index 00000000000..4dd5a100c1d --- /dev/null +++ b/vendor/github.com/docker/distribution/docker-bake.hcl @@ -0,0 +1,65 @@ +// GITHUB_REF is the actual ref that triggers the workflow +// https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables +variable "GITHUB_REF" { + default = "" +} + +target "_common" { + args = { + GIT_REF = GITHUB_REF + } +} + +group "default" { + targets = ["image-local"] +} + +// Special target: https://github.com/docker/metadata-action#bake-definition +target "docker-metadata-action" { + tags = ["registry:local"] +} + +target "binary" { + inherits = ["_common"] + target = "binary" + output = ["./bin"] +} + +target "artifact" { + inherits = ["_common"] + target = "artifact" + output = ["./bin"] +} + +target "artifact-all" { + inherits = ["artifact"] + platforms = [ + "linux/amd64", + "linux/arm/v6", + "linux/arm/v7", + "linux/arm64", + "linux/ppc64le", + "linux/s390x" + ] +} + +target "image" { + inherits = ["_common", "docker-metadata-action"] +} + +target "image-local" { + inherits = ["image"] + output = ["type=docker"] +} + +target "image-all" { + inherits = ["image"] + platforms = [ + "linux/amd64", + "linux/arm/v6", + "linux/arm/v7", + "linux/arm64", + "linux/ppc64le", + "linux/s390x" + ] +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 1816baea1d6..8f84a220a97 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -87,7 +87,7 @@ func ManifestMediaTypes() (mediaTypes []string) { // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) -var mappings = make(map[string]UnmarshalFunc, 0) +var mappings = make(map[string]UnmarshalFunc) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go index 2d71fc5e9ff..b3dfb7a6d7e 100644 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 2f66cca87a3..8c0c23b2fe1 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go index 6d9bb4b62af..4c35b879afd 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) { for _, daErr := range errs { var err Error - switch daErr.(type) { + switch daErr := daErr.(type) { case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) + err = daErr.WithDetail(nil) case Error: - err = daErr.(Error) + err = daErr default: err = ErrorCodeUnknown.WithDetail(daErr) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 1337bdb1276..3c3ec989330 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { u.RawQuery = merged.Encode() return u } - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go index 6e3f1ccc410..fe238210cd7 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -117,8 +117,8 @@ func init() { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { t |= isSpace } if isChar && !isCtl && !isSeparator { diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index aa442e65406..3e2ae66d3cf 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -16,7 +16,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" + v2 "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" @@ -736,7 +736,12 @@ func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateO return nil, err } - resp, err := bs.client.Post(u, "", nil) + req, err := http.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + resp, err := bs.client.Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf index a249caf269d..bd1b4bff61c 100644 --- a/vendor/github.com/docker/distribution/vendor.conf +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -8,7 +8,7 @@ github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2 github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 @@ -48,4 +48,4 @@ gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 v2.2.1 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb -github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 +github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index bada4a8e3c8..b6bca4cef11 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -5755,7 +5755,6 @@ paths: property1: "string" property2: "string" IpcMode: "" - LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 @@ -8607,12 +8606,20 @@ paths: if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: - 201: + 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index bb7e4e33695..296c96a6334 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -8,11 +8,6 @@ import ( "strings" ) -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - // PortBinding represents a binding between a Host IP address and a Host Port type PortBinding struct { // HostIP is the host IP Address @@ -158,30 +153,33 @@ type PortMapping struct { func splitParts(rawport string) (string, string, string) { parts := strings.Split(rawport, ":") n := len(parts) - containerport := parts[n-1] + containerPort := parts[n-1] switch n { case 1: - return "", "", containerport + return "", "", containerPort case 2: - return "", parts[0], containerport + return "", parts[0], containerPort case 3: - return parts[0], parts[1], containerport + return parts[0], parts[1], containerPort default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort } } // ParsePortSpec parses a port specification string into a slice of PortMappings func ParsePortSpec(rawPort string) ([]PortMapping, error) { var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) + ip, hostPort, containerPort := splitParts(rawPort) proto, containerPort = SplitProtoPort(containerPort) - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + if ip != "" && ip[0] == '[' { + // Strip [] from IPV6 addresses + rawIP, _, err := net.SplitHostPort(ip + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + } + ip = rawIP } if ip != "" && net.ParseIP(ip) == nil { return nil, fmt.Errorf("Invalid ip address: %s", ip) diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go index ce950171e31..b6eed145e1c 100644 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -43,7 +43,7 @@ type portMapSorter []portMapEntry func (s portMapSorter) Len() int { return len(s) } func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -// sort the port so that the order is: +// Less sorts the port so that the order is: // 1. port with larger specified bindings // 2. larger port // 3. port with tcp protocol @@ -58,7 +58,7 @@ func (s portMapSorter) Less(i, j int) bool { func SortPortMap(ports []Port, bindings PortMap) { s := portMapSorter{} for _, p := range ports { - if binding, ok := bindings[p]; ok { + if binding, ok := bindings[p]; ok && len(binding) > 0 { for _, b := range binding { s = append(s, portMapEntry{port: p, binding: b}) } diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go index 98e9a1dc61b..c897cb02ade 100644 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -2,11 +2,8 @@ package sockets import ( "net" - "net/url" "os" "strings" - - "golang.org/x/net/proxy" ) // GetProxyEnv allows access to the uppercase and the lowercase forms of @@ -20,32 +17,12 @@ func GetProxyEnv(key string) string { return proxyValue } -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil +// DialerFromEnvironment was previously used to configure a net.Dialer to route +// connections through a SOCKS proxy. +// DEPRECATED: SOCKS proxies are now supported by configuring only +// http.Transport.Proxy, and no longer require changing http.Transport.Dial. +// Therefore, only sockets.ConfigureTransport() needs to be called, and any +// sockets.DialerFromEnvironment() calls can be dropped. +func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) { + return direct, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go index a1d7beb4d80..2e9e9006f61 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -3,14 +3,9 @@ package sockets import ( "errors" - "net" "net/http" - "time" ) -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - // ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. var ErrProtocolNotAvailable = errors.New("protocol not available") @@ -26,13 +21,6 @@ func ConfigureTransport(tr *http.Transport, proto, addr string) error { return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 386cf0dbbde..10d76342652 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,6 +3,7 @@ package sockets import ( + "context" "fmt" "net" "net/http" @@ -10,7 +11,10 @@ import ( "time" ) -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +const ( + defaultTimeout = 10 * time.Second + maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) +) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { @@ -18,8 +22,11 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { } // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) + dialer := &net.Dialer{ + Timeout: defaultTimeout, + } + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, proto, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go index 5c21644e1fe..7acafc5a2ad 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -1,6 +1,7 @@ package sockets import ( + "context" "net" "net/http" "time" @@ -15,8 +16,8 @@ func configureUnixTransport(tr *http.Transport, proto, addr string) error { func configureNpipeTransport(tr *http.Transport, proto, addr string) error { // No need for compression in local communications. tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return winio.DialPipeContext(ctx, addr) } return nil } diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index a8b5dbb6fdc..e7591e6edbf 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,5 +1,51 @@ // +build !windows +/* +Package sockets is a simple unix domain socket wrapper. + +Usage + +For example: + + import( + "fmt" + "net" + "os" + "github.com/docker/go-connections/sockets" + ) + + func main() { + l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets", + sockets.WithChown(0,0),sockets.WithChmod(0660)) + if err != nil { + panic(err) + } + echoStr := "hello" + + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte(echoStr)) + conn.Close() + } + }() + + conn, err := net.Dial("unix", path) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + if _, err := conn.Read(buf); err != nil { + panic(err) + } else if string(buf) != echoStr { + panic(fmt.Errorf("Msg may lost")) + } + } +*/ package sockets import ( @@ -8,25 +54,73 @@ import ( "syscall" ) -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { +// SockOption sets up socket file's creating option +type SockOption func(string) error + +// WithChown modifies the socket file's uid and gid +func WithChown(uid, gid int) SockOption { + return func(path string) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + return nil + } +} + +// WithChmod modifies socket file's access mode. +func WithChmod(mask os.FileMode) SockOption { + return func(path string) error { + if err := os.Chmod(path, mask); err != nil { + return err + } + return nil + } +} + +// NewUnixSocketWithOpts creates a unix socket with the specified options. +// By default, socket permissions are 0000 (i.e.: no access for anyone); pass +// WithChmod() and WithChown() to set the desired ownership and permissions. +// +// This function temporarily changes the system's "umask" to 0777 to work around +// a race condition between creating the socket and setting its permissions. While +// this should only be for a short duration, it may affect other processes that +// create files/directories during that period. +func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) + // net.Listen does not allow for permissions to be set. As a result, when + // specifying custom permissions ("WithChmod()"), there is a short time + // between creating the socket and applying the permissions, during which + // the socket permissions are Less restrictive than desired. + // + // To work around this limitation of net.Listen(), we temporarily set the + // umask to 0777, which forces the socket to be created with 000 permissions + // (i.e.: no access for anyone). After that, WithChmod() must be used to set + // the desired permissions. + // + // We don't use "defer" here, to reset the umask to its original value as soon + // as possible. Ideally we'd be able to detect if WithChmod() was passed as + // an option, and skip changing umask if default permissions are used. + origUmask := syscall.Umask(0777) l, err := net.Listen("unix", path) + syscall.Umask(origUmask) if err != nil { return nil, err } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err + + for _, op := range opts { + if err := op(path); err != nil { + _ = l.Close() + return nil, err + } } + return l, nil } + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 0ef3fdcb469..992968373eb 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -53,18 +53,9 @@ var acceptedCBCCiphers = []uint16{ // known weak algorithms removed. var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, @@ -72,25 +63,25 @@ func ServerDefault(ops ...func(*tls.Config)) *tls.Config { } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ + tlsConfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } for _, op := range ops { - op(tlsconfig) + op(tlsConfig) } - return tlsconfig + return tlsConfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. @@ -108,11 +99,11 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pem, err := ioutil.ReadFile(caFile) + pemData, err := ioutil.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pem) { + if !certPool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } return certPool, nil @@ -141,7 +132,7 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key +// password when trying to decrypt a TLS private key func IsErrEncryptedKey(err error) bool { return errors.Cause(err) == x509.IncorrectPasswordError } @@ -157,8 +148,8 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { } var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go new file mode 100644 index 00000000000..d8215f8e78a --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go @@ -0,0 +1,16 @@ +// +build go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go new file mode 100644 index 00000000000..a5ba7f4a388 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go @@ -0,0 +1,15 @@ +// +build !go1.13 + +package tlsconfig + +import ( + "crypto/tls" +) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} diff --git a/vendor/github.com/bits-and-blooms/bitset/.gitignore b/vendor/github.com/docker/go-events/.gitignore similarity index 97% rename from vendor/github.com/bits-and-blooms/bitset/.gitignore rename to vendor/github.com/docker/go-events/.gitignore index 5c204d28b0e..daf913b1b34 100644 --- a/vendor/github.com/bits-and-blooms/bitset/.gitignore +++ b/vendor/github.com/docker/go-events/.gitignore @@ -22,5 +22,3 @@ _testmain.go *.exe *.test *.prof - -target diff --git a/vendor/github.com/docker/go-events/CONTRIBUTING.md b/vendor/github.com/docker/go-events/CONTRIBUTING.md new file mode 100644 index 00000000000..d813af779b0 --- /dev/null +++ b/vendor/github.com/docker/go-events/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to Docker open source projects + +Want to hack on go-events? Awesome! Here are instructions to get you started. + +go-events is part of the [Docker](https://www.docker.com) project, and +follows the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 00000000000..6d630cf595e --- /dev/null +++ b/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-events/MAINTAINERS b/vendor/github.com/docker/go-events/MAINTAINERS new file mode 100644 index 00000000000..e414d82e964 --- /dev/null +++ b/vendor/github.com/docker/go-events/MAINTAINERS @@ -0,0 +1,46 @@ +# go-events maintainers file +# +# This file describes who runs the docker/go-events project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "aluzzardi", + "lk4d4", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/go-events/README.md b/vendor/github.com/docker/go-events/README.md new file mode 100644 index 00000000000..0acafc279a3 --- /dev/null +++ b/vendor/github.com/docker/go-events/README.md @@ -0,0 +1,117 @@ +# Docker Events Package + +[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) +[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) + +The Docker `events` package implements a composable event distribution package +for Go. + +Originally created to implement the [notifications in Docker Registry +2](https://github.com/docker/distribution/blob/master/docs/notifications.md), +we've found the pattern to be useful in other applications. This package is +most of the same code with slightly updated interfaces. Much of the internals +have been made available. + +## Usage + +The `events` package centers around a `Sink` type. Events are written with +calls to `Sink.Write(event Event)`. Sinks can be wired up in various +configurations to achieve interesting behavior. + +The canonical example is that employed by the +[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) +package. Let's say we have a type `httpSink` where we'd like to queue +notifications. As a rule, it should send a single http request and return an +error if it fails: + +```go +func (h *httpSink) Write(event Event) error { + p, err := json.Marshal(event) + if err != nil { + return err + } + body := bytes.NewReader(p) + resp, err := h.client.Post(h.url, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.Status != 200 { + return errors.New("unexpected status") + } + + return nil +} + +// implement (*httpSink).Close() +``` + +With just that, we can start using components from this package. One can call +`(*httpSink).Write` to send events as the body of a post request to a +configured URL. + +### Retries + +HTTP can be unreliable. The first feature we'd like is to have some retry: + +```go +hs := newHTTPSink(/*...*/) +retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) +``` + +We now have a sink that will retry events against the `httpSink` until they +succeed. The retry will backoff for one second after 5 consecutive failures +using the breaker strategy. + +### Queues + +This isn't quite enough. We we want a sink that doesn't block while we are +waiting for events to be sent. Let's add a `Queue`: + +```go +queue := NewQueue(retry) +``` + +Now, we have an unbounded queue that will work through all events sent with +`(*Queue).Write`. Events can be added asynchronously to the queue without +blocking the current execution path. This is ideal for use in an http request. + +### Broadcast + +It usually turns out that you want to send to more than one listener. We can +use `Broadcaster` to support this: + +```go +var broadcast = NewBroadcaster() // make it available somewhere in your application. +broadcast.Add(queue) // add your queue! +broadcast.Add(queue2) // and another! +``` + +With the above, we can now call `broadcast.Write` in our http handlers and have +all the events distributed to each queue. Because the events are queued, not +listener blocks another. + +### Extending + +For the most part, the above is sufficient for a lot of applications. However, +extending the above functionality can be done implementing your own `Sink`. The +behavior and semantics of the sink can be completely dependent on the +application requirements. The interface is provided below for reference: + +```go +type Sink { + Write(Event) error + Close() error +} +``` + +Application behavior can be controlled by how `Write` behaves. The examples +above are designed to queue the message and return as quickly as possible. +Other implementations may block until the event is committed to durable +storage. + +## Copyright and license + +Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, +Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 00000000000..5120078dfb8 --- /dev/null +++ b/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 00000000000..802cf51ffec --- /dev/null +++ b/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 00000000000..56db7c25103 --- /dev/null +++ b/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/vendor/github.com/docker/go-events/event.go b/vendor/github.com/docker/go-events/event.go new file mode 100644 index 00000000000..f0f1d9ea5ff --- /dev/null +++ b/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 00000000000..e6c0eb69dd5 --- /dev/null +++ b/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 00000000000..4bb770afc2e --- /dev/null +++ b/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 00000000000..b7f0a542252 --- /dev/null +++ b/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + failures uint64 // consecutive failure counter (needs to be 64-bit aligned) + config ExponentialBackoffConfig +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 42d9abc07ed..5152bf59bf8 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -1,20 +1,11 @@ -# Archived project. No maintenance. - -This project is not maintained anymore and is archived. Feel free to fork and -make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/) - -Thanks to everyone for their valuable feedback and contributions. - - -# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) Color lets you use colorized outputs in terms of [ANSI Escape Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It has support for Windows too! The API can be used in several ways, pick one that suits you. - -![Color](https://i.imgur.com/c1JI0lA.png) +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) ## Install @@ -87,7 +78,7 @@ notice("Don't forget this...") ### Custom fprint functions (FprintFunc) ```go -blue := color.New(FgBlue).FprintfFunc() +blue := color.New(color.FgBlue).FprintfFunc() blue(myWriter, "important notice: %s", stars) // Mix up with multiple attributes @@ -136,14 +127,16 @@ fmt.Println("All text will now be bold magenta.") There might be a case where you want to explicitly disable/enable color output. the `go-isatty` package will automatically disable color output for non-tty output streams -(for example if the output were piped directly to `less`) +(for example if the output were piped directly to `less`). -`Color` has support to disable/enable colors both globally and for single color -definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You -can easily disable the color output with: +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set (regardless of its value). -```go +`Color` has support to disable/enable colors programatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`--no-color` bool flag. You can easily disable the color output with: +```go var flagNoColor = flag.Bool("no-color", false, "Disable color output") if *flagNoColor { @@ -165,6 +158,10 @@ c.EnableColor() c.Println("This prints again cyan...") ``` +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + ## Todo * Save/Return previous values @@ -179,4 +176,3 @@ c.Println("This prints again cyan...") ## License The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details - diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index 91c8e9f0620..98a60f3c88d 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -15,9 +15,11 @@ import ( var ( // NoColor defines if the output is colorized or not. It's dynamically set to // false or true based on the stdout's file descriptor referring to a terminal - // or not. This is a global option and affects all colors. For more control - // over each color block use the methods DisableColor() individually. - NoColor = os.Getenv("TERM") == "dumb" || + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) // Output defines the standard output of the print functions. By default @@ -33,6 +35,12 @@ var ( colorsCacheMu sync.Mutex // protects colorsCache ) +// noColorExists returns true if the environment variable NO_COLOR exists. +func noColorExists() bool { + _, exists := os.LookupEnv("NO_COLOR") + return exists +} + // Color defines a custom color object which is defined by SGR parameters. type Color struct { params []Attribute @@ -108,7 +116,14 @@ const ( // New returns a newly created color object. func New(value ...Attribute) *Color { - c := &Color{params: make([]Attribute, 0)} + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorExists() { + c.noColor = boolPtr(true) + } + c.Add(value...) return c } @@ -387,7 +402,7 @@ func (c *Color) EnableColor() { } func (c *Color) isNoColorSet() bool { - // check first if we have user setted action + // check first if we have user set action if c.noColor != nil { return *c.noColor } diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go index cf1e96500f4..04541de786f 100644 --- a/vendor/github.com/fatih/color/doc.go +++ b/vendor/github.com/fatih/color/doc.go @@ -118,6 +118,8 @@ the color output with: color.NoColor = true // disables colorized output } +You can also disable the color by setting the NO_COLOR environment variable to any value. + It also has support for single color definitions (local). You can disable/enable color output on the fly: diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml index d5b0fe128a7..9921997daff 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -5,4 +5,4 @@ linters: disable-all: true enable: - gofumpt - - gci + - goimports diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE index 707a0ed49b2..20837167a9a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/LICENSE +++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013-2021, go-dockerclient authors +Copyright (c) go-dockerclient authors All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile index 431458441da..2f5d9fcc6a9 100644 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -7,12 +7,12 @@ test: pretest gotest .PHONY: golangci-lint golangci-lint: - cd /tmp && GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@latest + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest golangci-lint run .PHONY: staticcheck staticcheck: - cd /tmp && GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@master + go install honnef.co/go/tools/cmd/staticcheck@master staticcheck ./... .PHONY: lint diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go index cd2034304be..e9f13f3948d 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client_unix.go +++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package docker diff --git a/vendor/github.com/go-git/go-billy/v5/README.md b/vendor/github.com/go-git/go-billy/v5/README.md index ca58b1c8ae1..da5c074782c 100644 --- a/vendor/github.com/go-git/go-billy/v5/README.md +++ b/vendor/github.com/go-git/go-billy/v5/README.md @@ -1,4 +1,4 @@ -# go-billy [![GoDoc](https://godoc.org/gopkg.in/go-git/go-billy.v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-billy) [![Test](https://github.com/go-git/go-billy/workflows/Test/badge.svg)](https://github.com/go-git/go-billy/actions?query=workflow%3ATest) +# go-billy [![GoDoc](https://godoc.org/gopkg.in/go-git/go-billy.v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-billy/v5) [![Test](https://github.com/go-git/go-billy/workflows/Test/badge.svg)](https://github.com/go-git/go-billy/actions?query=workflow%3ATest) The missing interface filesystem abstraction for Go. Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations. diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go new file mode 100644 index 00000000000..f217693e6f0 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go @@ -0,0 +1,410 @@ +// Package memfs provides a billy filesystem base on memory. +package memfs // import "github.com/go-git/go-billy/v5/memfs" + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" + "github.com/go-git/go-billy/v5/util" +) + +const separator = filepath.Separator + +// Memory a very convenient filesystem based on memory files +type Memory struct { + s *storage + + tempCount int +} + +//New returns a new Memory filesystem. +func New() billy.Filesystem { + fs := &Memory{s: newStorage()} + return chroot.New(fs, string(separator)) +} + +func (fs *Memory) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +func (fs *Memory) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + f, has := fs.s.Get(filename) + if !has { + if !isCreate(flag) { + return nil, os.ErrNotExist + } + + var err error + f, err = fs.s.New(filename, perm, flag) + if err != nil { + return nil, err + } + } else { + if isExclusive(flag) { + return nil, os.ErrExist + } + + if target, isLink := fs.resolveLink(filename, f); isLink { + return fs.OpenFile(target, flag, perm) + } + } + + if f.mode.IsDir() { + return nil, fmt.Errorf("cannot open directory: %s", filename) + } + + return f.Duplicate(filename, perm, flag), nil +} + +var errNotLink = errors.New("not a link") + +func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) { + if !isSymlink(f.mode) { + return fullpath, false + } + + target = string(f.content.bytes) + if !isAbs(target) { + target = fs.Join(filepath.Dir(fullpath), target) + } + + return target, true +} + +// On Windows OS, IsAbs validates if a path is valid based on if stars with a +// unit (eg.: `C:\`) to assert that is absolute, but in this mem implementation +// any path starting by `separator` is also considered absolute. +func isAbs(path string) bool { + return filepath.IsAbs(path) || strings.HasPrefix(path, string(separator)) +} + +func (fs *Memory) Stat(filename string) (os.FileInfo, error) { + f, has := fs.s.Get(filename) + if !has { + return nil, os.ErrNotExist + } + + fi, _ := f.Stat() + + var err error + if target, isLink := fs.resolveLink(filename, f); isLink { + fi, err = fs.Stat(target) + if err != nil { + return nil, err + } + } + + // the name of the file should always the name of the stated file, so we + // overwrite the Stat returned from the storage with it, since the + // filename may belong to a link. + fi.(*fileInfo).name = filepath.Base(filename) + return fi, nil +} + +func (fs *Memory) Lstat(filename string) (os.FileInfo, error) { + f, has := fs.s.Get(filename) + if !has { + return nil, os.ErrNotExist + } + + return f.Stat() +} + +type ByName []os.FileInfo + +func (a ByName) Len() int { return len(a) } +func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() } +func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) { + if f, has := fs.s.Get(path); has { + if target, isLink := fs.resolveLink(path, f); isLink { + return fs.ReadDir(target) + } + } + + var entries []os.FileInfo + for _, f := range fs.s.Children(path) { + fi, _ := f.Stat() + entries = append(entries, fi) + } + + sort.Sort(ByName(entries)) + + return entries, nil +} + +func (fs *Memory) MkdirAll(path string, perm os.FileMode) error { + _, err := fs.s.New(path, perm|os.ModeDir, 0) + return err +} + +func (fs *Memory) TempFile(dir, prefix string) (billy.File, error) { + return util.TempFile(fs, dir, prefix) +} + +func (fs *Memory) getTempFilename(dir, prefix string) string { + fs.tempCount++ + filename := fmt.Sprintf("%s_%d_%d", prefix, fs.tempCount, time.Now().UnixNano()) + return fs.Join(dir, filename) +} + +func (fs *Memory) Rename(from, to string) error { + return fs.s.Rename(from, to) +} + +func (fs *Memory) Remove(filename string) error { + return fs.s.Remove(filename) +} + +func (fs *Memory) Join(elem ...string) string { + return filepath.Join(elem...) +} + +func (fs *Memory) Symlink(target, link string) error { + _, err := fs.Stat(link) + if err == nil { + return os.ErrExist + } + + if !os.IsNotExist(err) { + return err + } + + return util.WriteFile(fs, link, []byte(target), 0777|os.ModeSymlink) +} + +func (fs *Memory) Readlink(link string) (string, error) { + f, has := fs.s.Get(link) + if !has { + return "", os.ErrNotExist + } + + if !isSymlink(f.mode) { + return "", &os.PathError{ + Op: "readlink", + Path: link, + Err: fmt.Errorf("not a symlink"), + } + } + + return string(f.content.bytes), nil +} + +// Capabilities implements the Capable interface. +func (fs *Memory) Capabilities() billy.Capability { + return billy.WriteCapability | + billy.ReadCapability | + billy.ReadAndWriteCapability | + billy.SeekCapability | + billy.TruncateCapability +} + +type file struct { + name string + content *content + position int64 + flag int + mode os.FileMode + + isClosed bool +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) Read(b []byte) (int, error) { + n, err := f.ReadAt(b, f.position) + f.position += int64(n) + + if err == io.EOF && n != 0 { + err = nil + } + + return n, err +} + +func (f *file) ReadAt(b []byte, off int64) (int, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + if !isReadAndWrite(f.flag) && !isReadOnly(f.flag) { + return 0, errors.New("read not supported") + } + + n, err := f.content.ReadAt(b, off) + + return n, err +} + +func (f *file) Seek(offset int64, whence int) (int64, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + switch whence { + case io.SeekCurrent: + f.position += offset + case io.SeekStart: + f.position = offset + case io.SeekEnd: + f.position = int64(f.content.Len()) + offset + } + + return f.position, nil +} + +func (f *file) Write(p []byte) (int, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + if !isReadAndWrite(f.flag) && !isWriteOnly(f.flag) { + return 0, errors.New("write not supported") + } + + n, err := f.content.WriteAt(p, f.position) + f.position += int64(n) + + return n, err +} + +func (f *file) Close() error { + if f.isClosed { + return os.ErrClosed + } + + f.isClosed = true + return nil +} + +func (f *file) Truncate(size int64) error { + if size < int64(len(f.content.bytes)) { + f.content.bytes = f.content.bytes[:size] + } else if more := int(size) - len(f.content.bytes); more > 0 { + f.content.bytes = append(f.content.bytes, make([]byte, more)...) + } + + return nil +} + +func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File { + new := &file{ + name: filename, + content: f.content, + mode: mode, + flag: flag, + } + + if isAppend(flag) { + new.position = int64(new.content.Len()) + } + + if isTruncate(flag) { + new.content.Truncate() + } + + return new +} + +func (f *file) Stat() (os.FileInfo, error) { + return &fileInfo{ + name: f.Name(), + mode: f.mode, + size: f.content.Len(), + }, nil +} + +// Lock is a no-op in memfs. +func (f *file) Lock() error { + return nil +} + +// Unlock is a no-op in memfs. +func (f *file) Unlock() error { + return nil +} + +type fileInfo struct { + name string + size int + mode os.FileMode +} + +func (fi *fileInfo) Name() string { + return fi.name +} + +func (fi *fileInfo) Size() int64 { + return int64(fi.size) +} + +func (fi *fileInfo) Mode() os.FileMode { + return fi.mode +} + +func (*fileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi *fileInfo) IsDir() bool { + return fi.mode.IsDir() +} + +func (*fileInfo) Sys() interface{} { + return nil +} + +func (c *content) Truncate() { + c.bytes = make([]byte, 0) +} + +func (c *content) Len() int { + return len(c.bytes) +} + +func isCreate(flag int) bool { + return flag&os.O_CREATE != 0 +} + +func isExclusive(flag int) bool { + return flag&os.O_EXCL != 0 +} + +func isAppend(flag int) bool { + return flag&os.O_APPEND != 0 +} + +func isTruncate(flag int) bool { + return flag&os.O_TRUNC != 0 +} + +func isReadAndWrite(flag int) bool { + return flag&os.O_RDWR != 0 +} + +func isReadOnly(flag int) bool { + return flag == os.O_RDONLY +} + +func isWriteOnly(flag int) bool { + return flag&os.O_WRONLY != 0 +} + +func isSymlink(m os.FileMode) bool { + return m&os.ModeSymlink != 0 +} diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go new file mode 100644 index 00000000000..d3ff5a25db6 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go @@ -0,0 +1,229 @@ +package memfs + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" +) + +type storage struct { + files map[string]*file + children map[string]map[string]*file +} + +func newStorage() *storage { + return &storage{ + files: make(map[string]*file, 0), + children: make(map[string]map[string]*file, 0), + } +} + +func (s *storage) Has(path string) bool { + path = clean(path) + + _, ok := s.files[path] + return ok +} + +func (s *storage) New(path string, mode os.FileMode, flag int) (*file, error) { + path = clean(path) + if s.Has(path) { + if !s.MustGet(path).mode.IsDir() { + return nil, fmt.Errorf("file already exists %q", path) + } + + return nil, nil + } + + name := filepath.Base(path) + + f := &file{ + name: name, + content: &content{name: name}, + mode: mode, + flag: flag, + } + + s.files[path] = f + s.createParent(path, mode, f) + return f, nil +} + +func (s *storage) createParent(path string, mode os.FileMode, f *file) error { + base := filepath.Dir(path) + base = clean(base) + if f.Name() == string(separator) { + return nil + } + + if _, err := s.New(base, mode.Perm()|os.ModeDir, 0); err != nil { + return err + } + + if _, ok := s.children[base]; !ok { + s.children[base] = make(map[string]*file, 0) + } + + s.children[base][f.Name()] = f + return nil +} + +func (s *storage) Children(path string) []*file { + path = clean(path) + + l := make([]*file, 0) + for _, f := range s.children[path] { + l = append(l, f) + } + + return l +} + +func (s *storage) MustGet(path string) *file { + f, ok := s.Get(path) + if !ok { + panic(fmt.Errorf("couldn't find %q", path)) + } + + return f +} + +func (s *storage) Get(path string) (*file, bool) { + path = clean(path) + if !s.Has(path) { + return nil, false + } + + file, ok := s.files[path] + return file, ok +} + +func (s *storage) Rename(from, to string) error { + from = clean(from) + to = clean(to) + + if !s.Has(from) { + return os.ErrNotExist + } + + move := [][2]string{{from, to}} + + for pathFrom := range s.files { + if pathFrom == from || !filepath.HasPrefix(pathFrom, from) { + continue + } + + rel, _ := filepath.Rel(from, pathFrom) + pathTo := filepath.Join(to, rel) + + move = append(move, [2]string{pathFrom, pathTo}) + } + + for _, ops := range move { + from := ops[0] + to := ops[1] + + if err := s.move(from, to); err != nil { + return err + } + } + + return nil +} + +func (s *storage) move(from, to string) error { + s.files[to] = s.files[from] + s.files[to].name = filepath.Base(to) + s.children[to] = s.children[from] + + defer func() { + delete(s.children, from) + delete(s.files, from) + delete(s.children[filepath.Dir(from)], filepath.Base(from)) + }() + + return s.createParent(to, 0644, s.files[to]) +} + +func (s *storage) Remove(path string) error { + path = clean(path) + + f, has := s.Get(path) + if !has { + return os.ErrNotExist + } + + if f.mode.IsDir() && len(s.children[path]) != 0 { + return fmt.Errorf("dir: %s contains files", path) + } + + base, file := filepath.Split(path) + base = filepath.Clean(base) + + delete(s.children[base], file) + delete(s.files, path) + return nil +} + +func clean(path string) string { + return filepath.Clean(filepath.FromSlash(path)) +} + +type content struct { + name string + bytes []byte +} + +func (c *content) WriteAt(p []byte, off int64) (int, error) { + if off < 0 { + return 0, &os.PathError{ + Op: "writeat", + Path: c.name, + Err: errors.New("negative offset"), + } + } + + prev := len(c.bytes) + + diff := int(off) - prev + if diff > 0 { + c.bytes = append(c.bytes, make([]byte, diff)...) + } + + c.bytes = append(c.bytes[:off], p...) + if len(c.bytes) < prev { + c.bytes = c.bytes[:prev] + } + + return len(p), nil +} + +func (c *content) ReadAt(b []byte, off int64) (n int, err error) { + if off < 0 { + return 0, &os.PathError{ + Op: "readat", + Path: c.name, + Err: errors.New("negative offset"), + } + } + + size := int64(len(c.bytes)) + if off >= size { + return 0, io.EOF + } + + l := int64(len(b)) + if off+l > size { + l = size - off + } + + btr := c.bytes[off : off+l] + if len(btr) < len(b) { + err = io.EOF + } + n = copy(b, btr) + + return +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os.go b/vendor/github.com/go-git/go-billy/v5/osfs/os.go index 880389fff2f..9665d2755d3 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os.go @@ -1,3 +1,5 @@ +// +build !js + // Package osfs provides a billy filesystem for the OS. package osfs // import "github.com/go-git/go-billy/v5/osfs" @@ -16,12 +18,15 @@ const ( defaultCreateMode = 0666 ) +// Default Filesystem representing the root of the os filesystem. +var Default = &OS{} + // OS is a filesystem based on the os filesystem. type OS struct{} // New returns a new OS filesystem. func New(baseDir string) billy.Filesystem { - return chroot.New(&OS{}, baseDir) + return chroot.New(Default, baseDir) } func (fs *OS) Create(filename string) (billy.File, error) { diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go new file mode 100644 index 00000000000..8ae68fed680 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go @@ -0,0 +1,21 @@ +// +build js + +package osfs + +import ( + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" + "github.com/go-git/go-billy/v5/memfs" +) + +// globalMemFs is the global memory fs +var globalMemFs = memfs.New() + +// Default Filesystem representing the root of in-memory filesystem for a +// js/wasm environment. +var Default = memfs.New() + +// New returns a new OS filesystem. +func New(baseDir string) billy.Filesystem { + return chroot.New(Default, Default.Join("/", baseDir)) +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go index fe1eb85df41..e8f519ffe22 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go @@ -1,3 +1,5 @@ +// +build plan9 + package osfs import ( diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go index 7645dd52e69..c74d60ee6b5 100644 --- a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go @@ -1,4 +1,4 @@ -// +build !plan9,!windows +// +build !plan9,!windows,!js package osfs diff --git a/vendor/github.com/go-git/go-billy/v5/util/util.go b/vendor/github.com/go-git/go-billy/v5/util/util.go index 34c1d9e7494..5c77128c3ca 100644 --- a/vendor/github.com/go-git/go-billy/v5/util/util.go +++ b/vendor/github.com/go-git/go-billy/v5/util/util.go @@ -146,9 +146,8 @@ func nextSuffix() string { // to remove the file when no longer needed. func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) { // This implementation is based on stdlib ioutil.TempFile. - if dir == "" { - dir = os.TempDir() + dir = getTempDir(fs) } nconflict := 0 @@ -179,7 +178,7 @@ func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { // This implementation is based on stdlib ioutil.TempDir if dir == "" { - dir = os.TempDir() + dir = getTempDir(fs.(billy.Basic)) } nconflict := 0 @@ -207,6 +206,15 @@ func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { return } +func getTempDir(fs billy.Basic) string { + ch, ok := fs.(billy.Chroot) + if !ok || ch.Root() == "" || ch.Root() == "/" || ch.Root() == string(filepath.Separator) { + return os.TempDir() + } + + return ".tmp" +} + type underlying interface { Underlying() billy.Basic } @@ -222,3 +230,53 @@ func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) { return u.Underlying(), path } + +// ReadFile reads the named file and returns the contents from the given filesystem. +// A successful call returns err == nil, not err == EOF. +// Because ReadFile reads the whole file, it does not treat an EOF from Read +// as an error to be reported. +func ReadFile(fs billy.Basic, name string) ([]byte, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + + defer f.Close() + + var size int + if info, err := fs.Stat(name); err == nil { + size64 := info.Size() + if int64(int(size64)) == size64 { + size = int(size64) + } + } + + size++ // one byte for final read at EOF + // If a file claims a small size, read at least 512 bytes. + // In particular, files in Linux's /proc claim size 0 but + // then do not work right if read in small pieces, + // so an initial read of 1 byte would not work correctly. + + if size < 512 { + size = 512 + } + + data := make([]byte, 0, size) + for { + if len(data) >= cap(data) { + d := append(data[:cap(data)], 0) + data = d[:len(data)] + } + + n, err := f.Read(data[len(data):cap(data)]) + data = data[:len(data)+n] + + if err != nil { + if err == io.EOF { + err = nil + } + + return data, err + } + } +} diff --git a/vendor/github.com/go-git/go-git/v5/common.go b/vendor/github.com/go-git/go-git/v5/common.go index f837a2654c1..6174339a815 100644 --- a/vendor/github.com/go-git/go-git/v5/common.go +++ b/vendor/github.com/go-git/go-git/v5/common.go @@ -2,8 +2,6 @@ package git import "strings" -const defaultDotGitPath = ".git" - // countLines returns the number of lines in a string à la git, this is // The newline character is assumed to be '\n'. The empty string // contains 0 lines. If the last line of the string doesn't end with a diff --git a/vendor/github.com/go-git/go-git/v5/config/config.go b/vendor/github.com/go-git/go-git/v5/config/config.go index 7d6ab5886b1..1aee25a4c5a 100644 --- a/vendor/github.com/go-git/go-git/v5/config/config.go +++ b/vendor/github.com/go-git/go-git/v5/config/config.go @@ -12,6 +12,7 @@ import ( "sort" "strconv" + "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-git/v5/internal/url" format "github.com/go-git/go-git/v5/plumbing/format/config" "github.com/mitchellh/go-homedir" @@ -89,6 +90,13 @@ type Config struct { Window uint } + Init struct { + // DefaultBranch Allows overriding the default branch name + // e.g. when initializing a new repository or when cloning + // an empty repository. + DefaultBranch string + } + // Remotes list of repository remotes, the key of the map is the name // of the remote, should equal to RemoteConfig.Name. Remotes map[string]*RemoteConfig @@ -98,6 +106,9 @@ type Config struct { // Branches list of branches, the key is the branch name and should // equal Branch.Name Branches map[string]*Branch + // URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the + // key instead. + URLs map[string]*URL // Raw contains the raw information of a config file. The main goal is // preserve the parsed information from the original format, to avoid // dropping unsupported fields. @@ -110,6 +121,7 @@ func NewConfig() *Config { Remotes: make(map[string]*RemoteConfig), Submodules: make(map[string]*Submodule), Branches: make(map[string]*Branch), + URLs: make(map[string]*URL), Raw: format.New(), } @@ -147,7 +159,7 @@ func LoadConfig(scope Scope) (*Config, error) { } for _, file := range files { - f, err := os.Open(file) + f, err := osfs.Default.Open(file) if err != nil { if os.IsNotExist(err) { continue @@ -223,6 +235,8 @@ const ( userSection = "user" authorSection = "author" committerSection = "committer" + initSection = "init" + urlSection = "url" fetchKey = "fetch" urlKey = "url" bareKey = "bare" @@ -233,6 +247,7 @@ const ( rebaseKey = "rebase" nameKey = "name" emailKey = "email" + defaultBranchKey = "defaultBranch" // DefaultPackWindow holds the number of previous objects used to // generate deltas. The value 10 is the same used by git command. @@ -251,6 +266,7 @@ func (c *Config) Unmarshal(b []byte) error { c.unmarshalCore() c.unmarshalUser() + c.unmarshalInit() if err := c.unmarshalPack(); err != nil { return err } @@ -260,6 +276,10 @@ func (c *Config) Unmarshal(b []byte) error { return err } + if err := c.unmarshalURLs(); err != nil { + return err + } + return c.unmarshalRemotes() } @@ -313,6 +333,25 @@ func (c *Config) unmarshalRemotes() error { c.Remotes[r.Name] = r } + // Apply insteadOf url rules + for _, r := range c.Remotes { + r.applyURLRules(c.URLs) + } + + return nil +} + +func (c *Config) unmarshalURLs() error { + s := c.Raw.Section(urlSection) + for _, sub := range s.Subsections { + r := &URL{} + if err := r.unmarshal(sub); err != nil { + return err + } + + c.URLs[r.Name] = r + } + return nil } @@ -344,6 +383,11 @@ func (c *Config) unmarshalBranches() error { return nil } +func (c *Config) unmarshalInit() { + s := c.Raw.Section(initSection) + c.Init.DefaultBranch = s.Options.Get(defaultBranchKey) +} + // Marshal returns Config encoded as a git-config file. func (c *Config) Marshal() ([]byte, error) { c.marshalCore() @@ -352,6 +396,8 @@ func (c *Config) Marshal() ([]byte, error) { c.marshalRemotes() c.marshalSubmodules() c.marshalBranches() + c.marshalURLs() + c.marshalInit() buf := bytes.NewBuffer(nil) if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { @@ -475,6 +521,27 @@ func (c *Config) marshalBranches() { s.Subsections = newSubsections } +func (c *Config) marshalURLs() { + s := c.Raw.Section(urlSection) + s.Subsections = make(format.Subsections, len(c.URLs)) + + var i int + for _, r := range c.URLs { + section := r.marshal() + // the submodule section at config is a subset of the .gitmodule file + // we should remove the non-valid options for the config file. + s.Subsections[i] = section + i++ + } +} + +func (c *Config) marshalInit() { + s := c.Raw.Section(initSection) + if c.Init.DefaultBranch != "" { + s.SetOption(defaultBranchKey, c.Init.DefaultBranch) + } +} + // RemoteConfig contains the configuration for a given remote repository. type RemoteConfig struct { // Name of the remote @@ -482,6 +549,12 @@ type RemoteConfig struct { // URLs the URLs of a remote repository. It must be non-empty. Fetch will // always use the first URL, while push will use all of them. URLs []string + + // insteadOfRulesApplied have urls been modified + insteadOfRulesApplied bool + // originalURLs are the urls before applying insteadOf rules + originalURLs []string + // Fetch the default set of "refspec" for fetch operation Fetch []RefSpec @@ -542,7 +615,12 @@ func (c *RemoteConfig) marshal() *format.Subsection { if len(c.URLs) == 0 { c.raw.RemoveOption(urlKey) } else { - c.raw.SetOption(urlKey, c.URLs...) + urls := c.URLs + if c.insteadOfRulesApplied { + urls = c.originalURLs + } + + c.raw.SetOption(urlKey, urls...) } if len(c.Fetch) == 0 { @@ -562,3 +640,20 @@ func (c *RemoteConfig) marshal() *format.Subsection { func (c *RemoteConfig) IsFirstURLLocal() bool { return url.IsLocalEndpoint(c.URLs[0]) } + +func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) { + // save original urls + originalURLs := make([]string, len(c.URLs)) + copy(originalURLs, c.URLs) + + for i, url := range c.URLs { + if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil { + c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i]) + c.insteadOfRulesApplied = true + } + } + + if c.insteadOfRulesApplied { + c.originalURLs = originalURLs + } +} diff --git a/vendor/github.com/go-git/go-git/v5/config/url.go b/vendor/github.com/go-git/go-git/v5/config/url.go new file mode 100644 index 00000000000..114d6b2662a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/url.go @@ -0,0 +1,81 @@ +package config + +import ( + "errors" + "strings" + + format "github.com/go-git/go-git/v5/plumbing/format/config" +) + +var ( + errURLEmptyInsteadOf = errors.New("url config: empty insteadOf") +) + +// Url defines Url rewrite rules +type URL struct { + // Name new base url + Name string + // Any URL that starts with this value will be rewritten to start, instead, with . + // When more than one insteadOf strings match a given URL, the longest match is used. + InsteadOf string + + // raw representation of the subsection, filled by marshal or unmarshal are + // called. + raw *format.Subsection +} + +// Validate validates fields of branch +func (b *URL) Validate() error { + if b.InsteadOf == "" { + return errURLEmptyInsteadOf + } + + return nil +} + +const ( + insteadOfKey = "insteadOf" +) + +func (u *URL) unmarshal(s *format.Subsection) error { + u.raw = s + + u.Name = s.Name + u.InsteadOf = u.raw.Option(insteadOfKey) + return nil +} + +func (u *URL) marshal() *format.Subsection { + if u.raw == nil { + u.raw = &format.Subsection{} + } + + u.raw.Name = u.Name + u.raw.SetOption(insteadOfKey, u.InsteadOf) + + return u.raw +} + +func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL { + var longestMatch *URL + for _, u := range urls { + if !strings.HasPrefix(remoteURL, u.InsteadOf) { + continue + } + + // according to spec if there is more than one match, take the logest + if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) { + longestMatch = u + } + } + + return longestMatch +} + +func (u *URL) ApplyInsteadOf(url string) string { + if !strings.HasPrefix(url, u.InsteadOf) { + return url + } + + return u.Name + url[len(u.InsteadOf):] +} diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go index 2f9363150c8..70687965bbd 100644 --- a/vendor/github.com/go-git/go-git/v5/options.go +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -7,12 +7,12 @@ import ( "strings" "time" + "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" "github.com/go-git/go-git/v5/plumbing/transport" - "golang.org/x/crypto/openpgp" ) // SubmoduleRescursivity defines how depth will affect any submodule recursive @@ -60,6 +60,10 @@ type CloneOptions struct { // Tags describe how the tags will be fetched from the remote repository, // by default is AllTags. Tags TagMode + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte } // Validate validates the fields and sets the default values. @@ -105,6 +109,10 @@ type PullOptions struct { // Force allows the pull to update a local branch even when the remote // branch does not descend from it. Force bool + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte } // Validate validates the fields and sets the default values. @@ -155,6 +163,10 @@ type FetchOptions struct { // Force allows the fetch to update a local branch even when the remote // branch does not descend from it. Force bool + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte } // Validate validates the fields and sets the default values. @@ -194,6 +206,13 @@ type PushOptions struct { // Force allows the push to update a remote branch even when the local // branch does not descend from it. Force bool + // InsecureSkipTLS skips ssl verify if protocal is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte + // RequireRemoteRefs only allows a remote ref to be updated if its current + // value is the one specified here. + RequireRemoteRefs []config.RefSpec } // Validate validates the fields and sets the default values. @@ -374,7 +393,7 @@ var ( ErrMissingAuthor = errors.New("author field is required") ) -// AddOptions describes how a add operation should be performed +// AddOptions describes how an `add` operation should be performed type AddOptions struct { // All equivalent to `git add -A`, update the index not only where the // working tree has a file matching `Path` but also where the index already @@ -382,7 +401,7 @@ type AddOptions struct { // working tree. If no `Path` nor `Glob` is given when `All` option is // used, all files in the entire working tree are updated. All bool - // Path is the exact filepath to a the file or directory to be added. + // Path is the exact filepath to the file or directory to be added. Path string // Glob adds all paths, matching pattern, to the index. If pattern matches a // directory path, all directory contents are added to the index recursively. @@ -552,6 +571,10 @@ func (o *CreateTagOptions) loadConfigTagger(r *Repository) error { type ListOptions struct { // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod + // InsecureSkipTLS skips ssl verify if protocal is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte } // CleanOptions describes how a clean should be performed. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go index 413984aa54d..fa605b1985e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go @@ -38,6 +38,10 @@ type UnifiedEncoder struct { // a change. contextLines int + // srcPrefix and dstPrefix are prepended to file paths when encoding a diff. + srcPrefix string + dstPrefix string + // colorConfig is the color configuration. The default is no color. color ColorConfig } @@ -46,6 +50,8 @@ type UnifiedEncoder struct { func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder { return &UnifiedEncoder{ Writer: w, + srcPrefix: "a/", + dstPrefix: "b/", contextLines: contextLines, } } @@ -56,6 +62,18 @@ func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder { return e } +// SetSrcPrefix sets e's srcPrefix and returns e. +func (e *UnifiedEncoder) SetSrcPrefix(prefix string) *UnifiedEncoder { + e.srcPrefix = prefix + return e +} + +// SetDstPrefix sets e's dstPrefix and returns e. +func (e *UnifiedEncoder) SetDstPrefix(prefix string) *UnifiedEncoder { + e.dstPrefix = prefix + return e +} + // Encode encodes patch. func (e *UnifiedEncoder) Encode(patch Patch) error { sb := &strings.Builder{} @@ -91,7 +109,8 @@ func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch Fil case from != nil && to != nil: hashEquals := from.Hash() == to.Hash() lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", from.Path(), to.Path()), + fmt.Sprintf("diff --git %s%s %s%s", + e.srcPrefix, from.Path(), e.dstPrefix, to.Path()), ) if from.Mode() != to.Mode() { lines = append(lines, @@ -115,22 +134,22 @@ func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch Fil ) } if !hashEquals { - lines = e.appendPathLines(lines, "a/"+from.Path(), "b/"+to.Path(), isBinary) + lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), e.dstPrefix+to.Path(), isBinary) } case from == nil: lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", to.Path(), to.Path()), + fmt.Sprintf("diff --git %s %s", e.srcPrefix+to.Path(), e.dstPrefix+to.Path()), fmt.Sprintf("new file mode %o", to.Mode()), fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()), ) - lines = e.appendPathLines(lines, "/dev/null", "b/"+to.Path(), isBinary) + lines = e.appendPathLines(lines, "/dev/null", e.dstPrefix+to.Path(), isBinary) case to == nil: lines = append(lines, - fmt.Sprintf("diff --git a/%s b/%s", from.Path(), from.Path()), + fmt.Sprintf("diff --git %s %s", e.srcPrefix+from.Path(), e.dstPrefix+from.Path()), fmt.Sprintf("deleted file mode %o", from.Mode()), fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash), ) - lines = e.appendPathLines(lines, "a/"+from.Path(), "/dev/null", isBinary) + lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), "/dev/null", isBinary) } sb.WriteString(e.color[Meta]) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go index 4a26325f9e8..7cea50cd8fb 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go @@ -5,7 +5,6 @@ import ( "bytes" "io/ioutil" "os" - "os/user" "strings" "github.com/go-git/go-billy/v5" @@ -116,16 +115,16 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { // // The function assumes fs is rooted at the root filesystem. func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { - usr, err := user.Current() + home, err := os.UserHomeDir() if err != nil { return } - return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile)) + return loadPatterns(fs, fs.Join(home, gitconfigFile)) } // LoadSystemPatterns loads gitignore patterns from from the gitignore file -// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does +// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does // not exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by // the core.excludesfile property does not exist, the function will return nil. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go index 98664a1ebff..7a1b8e5ae0c 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -9,7 +9,7 @@ import ( "io" "strings" - "golang.org/x/crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/storer" @@ -374,7 +374,7 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { return nil, err } - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) } func indent(t string) string { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go index 9b5f438c021..56b62c191f9 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go @@ -287,8 +287,16 @@ func printStat(fileStats []FileStat) string { for _, fs := range fileStats { addn := float64(fs.Addition) deln := float64(fs.Deletion) - adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor))) - dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor))) + addc := int(math.Floor(addn/scaleFactor)) + delc := int(math.Floor(deln/scaleFactor)) + if addc < 0 { + addc = 0 + } + if delc < 0 { + delc = 0 + } + adds := strings.Repeat("+", addc) + dels := strings.Repeat("-", delc) finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go index 46416580455..216010d913b 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go @@ -8,7 +8,7 @@ import ( stdioutil "io/ioutil" "strings" - "golang.org/x/crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/storer" @@ -304,7 +304,7 @@ func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) { return nil, err } - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) } // TagIter provides an iterator for a set of tags. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go index a129781157b..8d6a56f532a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go @@ -230,6 +230,12 @@ const ( PushCert Capability = "push-cert" // SymRef symbolic reference support for better negotiation. SymRef Capability = "symref" + // ObjectFormat takes a hash algorithm as an argument, indicates that the + // server supports the given hash algorithms. + ObjectFormat Capability = "object-format" + // Filter if present, fetch-pack may send "filter" commands to request a + // partial clone or partial fetch and request that the server omit various objects from the packfile + Filter Capability = "filter" ) const DefaultAgent = "go-git/4.x" @@ -241,10 +247,11 @@ var known = map[Capability]bool{ NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, + ObjectFormat: true, Filter: true, } var requiresArgument = map[Capability]bool{ - Agent: true, PushCert: true, SymRef: true, + Agent: true, PushCert: true, SymRef: true, ObjectFormat: true, } var multipleArgument = map[Capability]bool{ diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go index 4f6d210e98a..20c3d0560e0 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go @@ -3,7 +3,10 @@ package client import ( + "crypto/tls" + "crypto/x509" "fmt" + gohttp "net/http" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/file" @@ -21,6 +24,14 @@ var Protocols = map[string]transport.Transport{ "file": file.DefaultClient, } +var insecureClient = http.NewClient(&gohttp.Client{ + Transport: &gohttp.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, +}) + // InstallProtocol adds or modifies an existing protocol. func InstallProtocol(scheme string, c transport.Transport) { if c == nil { @@ -35,6 +46,31 @@ func InstallProtocol(scheme string, c transport.Transport) { // http://, https://, ssh:// and file://. // See `InstallProtocol` to add or modify protocols. func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { + return getTransport(endpoint) +} + +func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) { + if endpoint.Protocol == "https" { + if endpoint.InsecureSkipTLS { + return insecureClient, nil + } + + if len(endpoint.CaBundle) != 0 { + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + rootCAs.AppendCertsFromPEM(endpoint.CaBundle) + return http.NewClient(&gohttp.Client{ + Transport: &gohttp.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: rootCAs, + }, + }, + }), nil + } + } + f, ok := Protocols[endpoint.Protocol] if !ok { return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol) @@ -43,6 +79,5 @@ func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { if f == nil { return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol) } - return f, nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go index ead215557d9..a9ee2caee7e 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go @@ -58,6 +58,11 @@ type Session interface { // If the repository does not exist, returns ErrRepositoryNotFound. // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. AdvertisedReferences() (*packp.AdvRefs, error) + // AdvertisedReferencesContext retrieves the advertised references for a + // repository. + // If the repository does not exist, returns ErrRepositoryNotFound. + // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. + AdvertisedReferencesContext(context.Context) (*packp.AdvRefs, error) io.Closer } @@ -107,6 +112,10 @@ type Endpoint struct { Port int // Path is the repository path. Path string + // InsecureSkipTLS skips ssl verify if protocal is https + InsecureSkipTLS bool + // CaBundle specify additional ca bundle with system cert pool + CaBundle []byte } var defaultPorts = map[string]int{ diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go index f6e23652a79..6f0a38012b5 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go @@ -6,12 +6,13 @@ import ( "errors" "io" "os" - "os/exec" "path/filepath" "strings" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/internal/common" + "github.com/go-git/go-git/v5/utils/ioutil" + "golang.org/x/sys/execabs" ) // DefaultClient is the default local client. @@ -36,7 +37,7 @@ func NewClient(uploadPackBin, receivePackBin string) transport.Transport { func prefixExecPath(cmd string) (string, error) { // Use `git --exec-path` to find the exec path. - execCmd := exec.Command("git", "--exec-path") + execCmd := execabs.Command("git", "--exec-path") stdout, err := execCmd.StdoutPipe() if err != nil { @@ -54,7 +55,7 @@ func prefixExecPath(cmd string) (string, error) { return "", err } if isPrefix { - return "", errors.New("Couldn't read exec-path line all at once") + return "", errors.New("couldn't read exec-path line all at once") } err = execCmd.Wait() @@ -66,7 +67,7 @@ func prefixExecPath(cmd string) (string, error) { cmd = filepath.Join(execPath, cmd) // Make sure it actually exists. - _, err = exec.LookPath(cmd) + _, err = execabs.LookPath(cmd) if err != nil { return "", err } @@ -83,9 +84,9 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth cmd = r.ReceivePackBin } - _, err := exec.LookPath(cmd) + _, err := execabs.LookPath(cmd) if err != nil { - if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + if e, ok := err.(*execabs.Error); ok && e.Err == execabs.ErrNotFound { cmd, err = prefixExecPath(cmd) if err != nil { return nil, err @@ -95,11 +96,11 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth } } - return &command{cmd: exec.Command(cmd, ep.Path)}, nil + return &command{cmd: execabs.Command(cmd, ep.Path)}, nil } type command struct { - cmd *exec.Cmd + cmd *execabs.Cmd stderrCloser io.Closer closed bool } @@ -111,7 +112,7 @@ func (c *command) Start() error { func (c *command) StderrPipe() (io.Reader, error) { // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait. // We use an io.Pipe and close it after the command finishes. - r, w := io.Pipe() + r, w := ioutil.Pipe() c.cmd.Stderr = w c.stderrCloser = r return r, nil @@ -148,7 +149,7 @@ func (c *command) Close() error { } // When a repository does not exist, the command exits with code 128. - if _, ok := err.(*exec.ExitError); ok { + if _, ok := err.(*execabs.ExitError); ok { return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go index aeedc5bb552..d57c0feef59 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -3,6 +3,7 @@ package http import ( "bytes" + "context" "fmt" "net" "net/http" @@ -32,7 +33,7 @@ func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string const infoRefsPath = "/info/refs" -func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) { +func advertisedReferences(ctx context.Context, s *session, serviceName string) (ref *packp.AdvRefs, err error) { url := fmt.Sprintf( "%s%s?service=%s", s.endpoint.String(), infoRefsPath, serviceName, @@ -45,7 +46,7 @@ func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, e s.ApplyAuthToRequest(req) applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName) - res, err := s.client.Do(req) + res, err := s.client.Do(req.WithContext(ctx)) if err != nil { return nil, err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go index 433dfcfda19..4d14ff21ea4 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go @@ -25,7 +25,11 @@ func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transpor } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(s.session, transport.ReceivePackServiceName) + return advertisedReferences(context.TODO(), s.session, transport.ReceivePackServiceName) +} + +func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { + return advertisedReferences(ctx, s.session, transport.ReceivePackServiceName) } func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) ( diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go index db37089402a..e735b3d7c98 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go @@ -25,7 +25,11 @@ func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(s.session, transport.UploadPackServiceName) + return advertisedReferences(context.TODO(), s.session, transport.UploadPackServiceName) +} + +func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { + return advertisedReferences(ctx, s.session, transport.UploadPackServiceName) } func (s *upSession) UploadPack( diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go index 89432e34c14..fdb148f5990 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go @@ -162,14 +162,18 @@ func (c *client) listenFirstError(r io.Reader) chan string { return errLine } -// AdvertisedReferences retrieves the advertised references from the server. func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { + return s.AdvertisedReferencesContext(context.TODO()) +} + +// AdvertisedReferences retrieves the advertised references from the server. +func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { if s.advRefs != nil { return s.advRefs, nil } ar := packp.NewAdvRefs() - if err := ar.Decode(s.Stdout); err != nil { + if err := ar.Decode(s.StdoutContext(ctx)); err != nil { if err := s.handleAdvRefDecodeError(err); err != nil { return nil, err } @@ -229,7 +233,7 @@ func (s *session) handleAdvRefDecodeError(err error) error { // UploadPack performs a request to the server to fetch a packfile. A reader is // returned with the packfile content. The reader must be closed after reading. func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { - if req.IsEmpty() { + if req.IsEmpty() && len(req.Shallows) == 0 { return nil, transport.ErrEmptyUploadPackRequest } @@ -237,7 +241,7 @@ func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) return nil, err } - if _, err := s.AdvertisedReferences(); err != nil { + if _, err := s.AdvertisedReferencesContext(ctx); err != nil { return nil, err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go index 727f9021507..8ab70fe7072 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go @@ -108,6 +108,10 @@ type upSession struct { } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { + return s.AdvertisedReferencesContext(context.TODO()) +} + +func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { @@ -162,7 +166,7 @@ func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest return nil, err } - pr, pw := io.Pipe() + pr, pw := ioutil.Pipe() e := packfile.NewEncoder(pw, s.storer, false) go func() { // TODO: plumb through a pack window. @@ -204,6 +208,10 @@ type rpSession struct { } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { + return s.AdvertisedReferencesContext(context.TODO()) +} + +func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go index b79a74e41d6..35146695462 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go @@ -1,8 +1,6 @@ package ssh import ( - "crypto/x509" - "encoding/pem" "errors" "fmt" "io/ioutil" @@ -13,7 +11,7 @@ import ( "github.com/go-git/go-git/v5/plumbing/transport" "github.com/mitchellh/go-homedir" - "github.com/xanzy/ssh-agent" + sshagent "github.com/xanzy/ssh-agent" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/knownhosts" ) @@ -121,27 +119,15 @@ type PublicKeys struct { // NewPublicKeys returns a PublicKeys from a PEM encoded private key. An // encryption password should be given if the pemBytes contains a password // encrypted PEM block otherwise password should be empty. It supports RSA -// (PKCS#1), DSA (OpenSSL), and ECDSA private keys. +// (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("invalid PEM data") - } - if x509.IsEncryptedPEMBlock(block) { - key, err := x509.DecryptPEMBlock(block, []byte(password)) - if err != nil { - return nil, err - } - - block = &pem.Block{Type: block.Type, Bytes: key} - pemBytes = pem.EncodeToMemory(block) - } - signer, err := ssh.ParsePrivateKey(pemBytes) + if _, ok := err.(*ssh.PassphraseMissingError); ok { + signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(password)) + } if err != nil { return nil, err } - return &PublicKeys{User: user, Signer: signer}, nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go index c05ded986d8..46e79134c33 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go @@ -6,6 +6,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/internal/common" @@ -90,8 +91,14 @@ func (c *command) Close() error { //XXX: If did read the full packfile, then the session might be already // closed. _ = c.Session.Close() + err := c.client.Close() - return c.client.Close() + //XXX: in go1.16+ we can use errors.Is(err, net.ErrClosed) + if err != nil && strings.HasSuffix(err.Error(), "use of closed network connection") { + return nil + } + + return err } // connect connects to the SSH server, unless a AuthMethod was set with diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go index d88e8e6ff6d..4a06106c536 100644 --- a/vendor/github.com/go-git/go-git/v5/remote.go +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "time" "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-git/v5/config" @@ -91,7 +92,7 @@ func (r *Remote) Push(o *PushOptions) error { // the remote was already up-to-date. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { if err := o.Validate(); err != nil { @@ -102,14 +103,14 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) } - s, err := newSendPackSession(r.c.URLs[0], o.Auth) + s, err := newSendPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle) if err != nil { return err } defer ioutil.CheckClose(s, &err) - ar, err := s.AdvertisedReferences() + ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return err } @@ -119,6 +120,10 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { return err } + if err := r.checkRequireRemoteRefs(o.RequireRemoteRefs, remoteRefs); err != nil { + return err + } + isDelete := false allDelete := true for _, rs := range o.RefSpecs { @@ -280,7 +285,7 @@ func (r *Remote) updateRemoteReferenceStorage( // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error { _, err := r.fetch(ctx, o) @@ -309,14 +314,14 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen o.RefSpecs = r.c.Fetch } - s, err := newUploadPackSession(r.c.URLs[0], o.Auth) + s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle) if err != nil { return nil, err } defer ioutil.CheckClose(s, &err) - ar, err := s.AdvertisedReferences() + ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return nil, err } @@ -345,6 +350,13 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen return nil, err } + if !req.Depth.IsZero() { + req.Shallows, err = r.s.Shallow() + if err != nil { + return nil, fmt.Errorf("existing checkout is not shallow") + } + } + req.Wants, err = getWants(r.s, refs) if len(req.Wants) > 0 { req.Haves, err = getHaves(localRefs, remoteRefs, r.s) @@ -362,6 +374,13 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen return nil, err } + if !updated { + updated, err = depthChanged(req.Shallows, r.s) + if err != nil { + return nil, fmt.Errorf("error checking depth change: %v", err) + } + } + if !updated { return remoteRefs, NoErrAlreadyUpToDate } @@ -369,8 +388,31 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen return remoteRefs, nil } -func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) { - c, ep, err := newClient(url) +func depthChanged(before []plumbing.Hash, s storage.Storer) (bool, error) { + after, err := s.Shallow() + if err != nil { + return false, err + } + + if len(before) != len(after) { + return true, nil + } + + bm := make(map[plumbing.Hash]bool, len(before)) + for _, b := range before { + bm[b] = true + } + for _, a := range after { + if _, ok := bm[a]; !ok { + return true, nil + } + } + + return false, nil +} + +func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.UploadPackSession, error) { + c, ep, err := newClient(url, auth, insecure, cabundle) if err != nil { return nil, err } @@ -378,8 +420,8 @@ func newUploadPackSession(url string, auth transport.AuthMethod) (transport.Uplo return c.NewUploadPackSession(ep, auth) } -func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) { - c, ep, err := newClient(url) +func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.ReceivePackSession, error) { + c, ep, err := newClient(url, auth, insecure, cabundle) if err != nil { return nil, err } @@ -387,11 +429,13 @@ func newSendPackSession(url string, auth transport.AuthMethod) (transport.Receiv return c.NewReceivePackSession(ep, auth) } -func newClient(url string) (transport.Transport, *transport.Endpoint, error) { +func newClient(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.Transport, *transport.Endpoint, error) { ep, err := transport.NewEndpoint(url) if err != nil { return nil, nil, err } + ep.InsecureSkipTLS = insecure + ep.CaBundle = cabundle c, err := client.NewClient(ep) if err != nil { @@ -771,6 +815,11 @@ func doCalculateRefs( } func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) { + shallow := false + if s, _ := localStorer.Shallow(); len(s) > 0 { + shallow = true + } + wants := map[plumbing.Hash]bool{} for _, ref := range refs { hash := ref.Hash() @@ -779,7 +828,7 @@ func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumb return nil, err } - if !exists { + if !exists || shallow { wants[hash] = true } } @@ -1024,15 +1073,32 @@ func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, e } // List the references on the remote repository. +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (r *Remote) ListContext(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) { + refs, err := r.list(ctx, o) + if err != nil { + return refs, err + } + return refs, nil +} + func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { - s, err := newUploadPackSession(r.c.URLs[0], o.Auth) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + return r.ListContext(ctx, o) +} + +func (r *Remote) list(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) { + s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle) if err != nil { return nil, err } defer ioutil.CheckClose(s, &err) - ar, err := s.AdvertisedReferences() + ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return nil, err } @@ -1101,7 +1167,7 @@ func pushHashes( allDelete bool, ) (*packp.ReportStatus, error) { - rd, wr := io.Pipe() + rd, wr := ioutil.Pipe() config, err := s.Config() if err != nil { @@ -1164,3 +1230,33 @@ outer: return r.s.SetShallow(shallows) } + +func (r *Remote) checkRequireRemoteRefs(requires []config.RefSpec, remoteRefs storer.ReferenceStorer) error { + for _, require := range requires { + if require.IsWildcard() { + return fmt.Errorf("wildcards not supported in RequireRemoteRefs, got %s", require.String()) + } + + name := require.Dst("") + remote, err := remoteRefs.Reference(name) + if err != nil { + return fmt.Errorf("remote ref %s required to be %s but is absent", name.String(), require.Src()) + } + + var requireHash string + if require.IsExactSHA1() { + requireHash = require.Src() + } else { + target, err := storer.ResolveReference(remoteRefs, plumbing.ReferenceName(require.Src())) + if err != nil { + return fmt.Errorf("could not resolve ref %s in RequireRemoteRefs", require.Src()) + } + requireHash = target.Hash().String() + } + + if remote.Hash().String() != requireHash { + return fmt.Errorf("remote ref %s required to be %s but is %s", name.String(), requireHash, remote.Hash().String()) + } + } + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go index 36fc1f5bd35..d3fbf97593e 100644 --- a/vendor/github.com/go-git/go-git/v5/repository.go +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -6,7 +6,6 @@ import ( "encoding/hex" "errors" "fmt" - "io" stdioutil "io/ioutil" "os" "path" @@ -14,8 +13,10 @@ import ( "strings" "time" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/osfs" + "github.com/go-git/go-billy/v5/util" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/internal/revision" "github.com/go-git/go-git/v5/plumbing" @@ -25,12 +26,9 @@ import ( "github.com/go-git/go-git/v5/plumbing/storer" "github.com/go-git/go-git/v5/storage" "github.com/go-git/go-git/v5/storage/filesystem" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" "github.com/go-git/go-git/v5/utils/ioutil" "github.com/imdario/mergo" - "golang.org/x/crypto/openpgp" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/osfs" ) // GitDirName this is a special folder where all the git stuff is. @@ -190,10 +188,6 @@ func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { // Clone a repository into the given Storer and worktree Filesystem with the // given options, if worktree is nil a bare repository is created. If the given // storer is not empty ErrRepositoryAlreadyExists is returned. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { return CloneContext(context.Background(), s, worktree, o) } @@ -203,7 +197,7 @@ func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repos // given storer is not empty ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func CloneContext( ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions, @@ -279,17 +273,18 @@ func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, return nil, nil, err } - pathinfo, err := os.Stat(path) - if !os.IsNotExist(err) { - if !pathinfo.IsDir() && detect { - path = filepath.Dir(path) - } - } - var fs billy.Filesystem var fi os.FileInfo for { fs = osfs.New(path) + + pathinfo, err := fs.Stat("/") + if !os.IsNotExist(err) { + if !pathinfo.IsDir() && detect { + fs = osfs.New(filepath.Dir(path)) + } + } + fi, err = fs.Stat(GitDirName) if err == nil { // no error; stop @@ -398,7 +393,7 @@ func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) // ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. // // TODO(mcuadros): move isBare to CloneOptions in v5 @@ -433,7 +428,7 @@ func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { } func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { - fi, err := os.Stat(path) + fi, err := osfs.Default.Stat(path) if err != nil { if os.IsNotExist(err) { return true, true, nil @@ -446,44 +441,30 @@ func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err er return false, false, fmt.Errorf("path is not a directory: %s", path) } - f, err := os.Open(path) + files, err := osfs.Default.ReadDir(path) if err != nil { return false, false, err } - defer ioutil.CheckClose(f, &err) - - _, err = f.Readdirnames(1) - if err == io.EOF { + if len(files) == 0 { return true, false, nil } - if err != nil { - return false, false, err - } - return false, false, nil } func cleanUpDir(path string, all bool) error { if all { - return os.RemoveAll(path) + return util.RemoveAll(osfs.Default, path) } - f, err := os.Open(path) + files, err := osfs.Default.ReadDir(path) if err != nil { return err } - defer ioutil.CheckClose(f, &err) - - names, err := f.Readdirnames(-1) - if err != nil { - return err - } - - for _, name := range names { - if err := os.RemoveAll(filepath.Join(path, name)); err != nil { + for _, fi := range files { + if err := util.RemoveAll(osfs.Default, osfs.Default.Join(path, fi.Name())); err != nil { return err } } @@ -841,12 +822,14 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { } ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ - RefSpecs: c.Fetch, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Tags: o.Tags, - RemoteName: o.RemoteName, + RefSpecs: c.Fetch, + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + Tags: o.Tags, + RemoteName: o.RemoteName, + InsecureSkipTLS: o.InsecureSkipTLS, + CABundle: o.CABundle, }, o.ReferenceName) if err != nil { return err @@ -892,11 +875,13 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { Name: branchName, Merge: branchRef, } + if o.RemoteName == "" { b.Remote = "origin" } else { b.Remote = o.RemoteName } + if err := r.CreateBranch(b); err != nil { return err } @@ -1099,7 +1084,7 @@ func (r *Repository) Fetch(o *FetchOptions) error { // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error { if err := o.Validate(); err != nil { @@ -1126,7 +1111,7 @@ func (r *Repository) Push(o *PushOptions) error { // FetchOptions.RemoteName. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { if err := o.Validate(); err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/submodule.go b/vendor/github.com/go-git/go-git/v5/submodule.go index dff26b0d80b..a202a9b6007 100644 --- a/vendor/github.com/go-git/go-git/v5/submodule.go +++ b/vendor/github.com/go-git/go-git/v5/submodule.go @@ -5,6 +5,8 @@ import ( "context" "errors" "fmt" + "net/url" + "path" "github.com/go-git/go-billy/v5" "github.com/go-git/go-git/v5/config" @@ -131,9 +133,29 @@ func (s *Submodule) Repository() (*Repository, error) { return nil, err } + moduleURL, err := url.Parse(s.c.URL) + if err != nil { + return nil, err + } + + if !path.IsAbs(moduleURL.Path) { + remotes, err := s.w.r.Remotes() + if err != nil { + return nil, err + } + + rootURL, err := url.Parse(remotes[0].c.URLs[0]) + if err != nil { + return nil, err + } + + rootURL.Path = path.Join(rootURL.Path, moduleURL.Path) + *moduleURL = *rootURL + } + _, err = r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, - URLs: []string{s.c.URL}, + URLs: []string{moduleURL.String()}, }) return r, err @@ -151,7 +173,7 @@ func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { // setting in the options SubmoduleUpdateOptions.Init equals true. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { return s.update(ctx, o, plumbing.ZeroHash) @@ -232,6 +254,24 @@ func (s *Submodule) fetchAndCheckout( return err } + // Handle a case when submodule refers to an orphaned commit that's still reachable + // through Git server using a special protocol capability[1]. + // + // [1]: https://git-scm.com/docs/protocol-capabilities#_allow_reachable_sha1_in_want + if !o.NoFetch { + if _, err := w.r.Object(plumbing.AnyObject, hash); err != nil { + refSpec := config.RefSpec("+" + hash.String() + ":" + hash.String()) + + err := r.FetchContext(ctx, &FetchOptions{ + Auth: o.Auth, + RefSpecs: []config.RefSpec{refSpec}, + }) + if err != nil && err != NoErrAlreadyUpToDate && err != ErrExactSHA1NotSupported { + return err + } + } + } + if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil { return err } @@ -262,7 +302,7 @@ func (s Submodules) Update(o *SubmoduleUpdateOptions) error { // UpdateContext updates all the submodules in this list. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { for _, sub := range s { diff --git a/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go b/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go index e9dcbfe49bf..b52e85a380b 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go +++ b/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go @@ -7,7 +7,7 @@ import ( "errors" "io" - "github.com/jbenet/go-context/io" + ctxio "github.com/jbenet/go-context/io" ) type readPeeker interface { @@ -168,3 +168,13 @@ func (r *writerOnError) Write(p []byte) (n int, err error) { return } + +type PipeReader interface { + io.ReadCloser + CloseWithError(err error) error +} + +type PipeWriter interface { + io.WriteCloser + CloseWithError(err error) error +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe.go b/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe.go new file mode 100644 index 00000000000..f30c452fa4e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe.go @@ -0,0 +1,9 @@ +// +build !js + +package ioutil + +import "io" + +func Pipe() (PipeReader, PipeWriter) { + return io.Pipe() +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe_js.go b/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe_js.go new file mode 100644 index 00000000000..cf102e6ef83 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe_js.go @@ -0,0 +1,9 @@ +// +build js + +package ioutil + +import "github.com/acomagu/bufpipe" + +func Pipe() (PipeReader, PipeWriter) { + return bufpipe.New(nil) +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go index 62ad03b95c8..f23d9f1703c 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree.go +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -59,7 +59,7 @@ func (w *Worktree) Pull(o *PullOptions) error { // Pull only supports merges where the can be resolved as a fast-forward. // // The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the +// operation is complete, an error is returned. The context only affects the // transport operations. func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { if err := o.Validate(); err != nil { @@ -72,11 +72,13 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { } fetchHead, err := remote.fetch(ctx, &FetchOptions{ - RemoteName: o.RemoteName, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Force: o.Force, + RemoteName: o.RemoteName, + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + Force: o.Force, + InsecureSkipTLS: o.InsecureSkipTLS, + CABundle: o.CABundle, }) updated := true @@ -716,7 +718,11 @@ func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { } m := config.NewModules() - return m, m.Unmarshal(input) + if err := m.Unmarshal(input); err != nil { + return m, err + } + + return m, nil } // Clean the worktree by removing untracked files. @@ -765,7 +771,7 @@ func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files } } - if opts.Dir { + if opts.Dir && dir != "" { return doCleanDirectories(w.Filesystem, dir) } return nil diff --git a/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/vendor/github.com/go-git/go-git/v5/worktree_commit.go index a9d0e0409c1..dc7956909ee 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_commit.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_commit.go @@ -12,8 +12,8 @@ import ( "github.com/go-git/go-git/v5/plumbing/object" "github.com/go-git/go-git/v5/storage" + "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-billy/v5" - "golang.org/x/crypto/openpgp" ) // Commit stores the current contents of the index in a new commit along with @@ -230,5 +230,9 @@ func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tr return plumbing.ZeroHash, err } + hash := o.Hash() + if h.s.HasEncodedObject(hash) == nil { + return hash, nil + } return h.s.SetEncodedObject(o) } diff --git a/vendor/github.com/go-git/go-git/v5/worktree_js.go b/vendor/github.com/go-git/go-git/v5/worktree_js.go new file mode 100644 index 00000000000..7267d055e7a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_js.go @@ -0,0 +1,26 @@ +// +build js + +package git + +import ( + "syscall" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Stat_t); ok { + e.CreatedAt = time.Unix(int64(os.Ctime), int64(os.CtimeNsec)) + e.Dev = uint32(os.Dev) + e.Inode = uint32(os.Ino) + e.GID = os.Gid + e.UID = os.Uid + } + } +} + +func isSymlinkWindowsNonAdmin(err error) bool { + return false +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 4727d27f9ea..b23ab9679a8 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -126,6 +126,14 @@ type Options struct { // called for key-value pairs passed directly to Info and Error. See // RenderBuiltinsHook for more details. RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int } // MessageClass indicates which category or categories of messages to consider. @@ -193,11 +201,16 @@ func NewFormatterJSON(opts Options) Formatter { return newFormatter(opts, outputJSON) } -const defaultTimestampFmt = "2006-01-02 15:04:05.000000" +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 func newFormatter(opts Options, outfmt outputFormat) Formatter { if opts.TimestampFormat == "" { - opts.TimestampFormat = defaultTimestampFmt + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth } f := Formatter{ outputFormat: outfmt, @@ -321,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b } func (f Formatter) pretty(value interface{}) string { - return f.prettyWithFlags(value, 0) + return f.prettyWithFlags(value, 0, 0) } const ( @@ -329,7 +342,11 @@ const ( ) // TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + // Handle types that take full control of logging. if v, ok := value.(logr.Marshaler); ok { // Replace the value with what the type wants to get logged. @@ -394,7 +411,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { // arbitrary keys might need escaping buf.WriteString(prettyString(v[i].(string))) buf.WriteByte(':') - buf.WriteString(f.pretty(v[i+1])) + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } if flags&flagRawStruct == 0 { buf.WriteByte('}') @@ -464,7 +481,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { buf.WriteByte(',') } if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct)) + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) continue } if name == "" { @@ -475,7 +492,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { buf.WriteString(name) buf.WriteByte('"') buf.WriteByte(':') - buf.WriteString(f.pretty(v.Field(i).Interface())) + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) } if flags&flagRawStruct == 0 { buf.WriteByte('}') @@ -488,7 +505,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { buf.WriteByte(',') } e := v.Index(i) - buf.WriteString(f.pretty(e.Interface())) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) } buf.WriteByte(']') return buf.String() @@ -513,7 +530,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { keystr = prettyString(keystr) } else { // prettyWithFlags will produce already-escaped values - keystr = f.prettyWithFlags(it.Key().Interface(), 0) + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) if t.Key().Kind() != reflect.String { // JSON only does string keys. Unlike Go's standard JSON, we'll // convert just about anything to a string. @@ -522,7 +539,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { } buf.WriteString(keystr) buf.WriteByte(':') - buf.WriteString(f.pretty(it.Value().Interface())) + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) i++ } buf.WriteByte('}') @@ -531,7 +548,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32) string { if v.IsNil() { return "null" } - return f.pretty(v.Elem().Interface()) + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) } return fmt.Sprintf(`""`, t.Kind().String()) } diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 86d0903b8b5..2a5446762c5 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -36,7 +36,6 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) @@ -319,7 +318,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -343,8 +341,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -372,19 +368,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index 5ff0b4218c6..ae851fe53f2 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 21eb54858e0..e2c0f74e839 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 1daaaacc5ee..36062a604ca 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 4b91dbcacae..a3b97a1ad57 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf8a..00000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f052934..00000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index b6c12cefb47..7b498bb2cb9 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -9,6 +9,8 @@ import ( "strconv" ) +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { @@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte // of the same name and within the same package, // but declared within the namespace of different functions. + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 44f4a5afddc..1a71bfcbd39 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index a605953d466..16e6860af6e 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index f01eff318c5..c7100346323 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -178,7 +178,7 @@ type structField struct { unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 33f03577f98..76c04fdbd6a 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -207,9 +207,10 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, // Check whether this is a []byte of text data. if t.Elem() == reflect.TypeOf(byte(0)) { b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) + skipType = true return opts.WithTypeMode(emitType).FormatType(t, out) } } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 2ad3bc85ba8..68b5c1ae164 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -563,10 +563,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go index 8d306bf5134..fe28d15b6f9 100644 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -32,6 +32,7 @@ func DefaultPooledTransport() *http.Transport { IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, + ForceAttemptHTTP2: true, MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, } return transport diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go index 412ff549739..6dc9600c860 100644 --- a/vendor/github.com/jinzhu/copier/copier.go +++ b/vendor/github.com/jinzhu/copier/copier.go @@ -24,6 +24,13 @@ const ( // Denotes that the value as been copied hasCopied + + // Some default converter types for a nicer syntax + String string = "" + Bool bool = false + Int int = 0 + Float32 float32 = 0 + Float64 float64 = 0 ) // Option sets copy options @@ -32,6 +39,18 @@ type Option struct { // struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go) IgnoreEmpty bool DeepCopy bool + Converters []TypeConverter +} + +type TypeConverter struct { + SrcType interface{} + DstType interface{} + Fn func(src interface{}) (interface{}, error) +} + +type converterPair struct { + SrcType reflect.Type + DstType reflect.Type } // Tag Flags @@ -59,12 +78,27 @@ func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) { var ( - isSlice bool - amount = 1 - from = indirect(reflect.ValueOf(fromValue)) - to = indirect(reflect.ValueOf(toValue)) + isSlice bool + amount = 1 + from = indirect(reflect.ValueOf(fromValue)) + to = indirect(reflect.ValueOf(toValue)) + converters map[converterPair]TypeConverter ) + // save convertes into map for faster lookup + for i := range opt.Converters { + if converters == nil { + converters = make(map[converterPair]TypeConverter) + } + + pair := converterPair{ + SrcType: reflect.TypeOf(opt.Converters[i].SrcType), + DstType: reflect.TypeOf(opt.Converters[i].DstType), + } + + converters[pair] = opt.Converters[i] + } + if !to.CanAddr() { return ErrInvalidCopyDestination } @@ -113,13 +147,16 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) for _, k := range from.MapKeys() { toKey := indirect(reflect.New(toType.Key())) - if !set(toKey, k, opt.DeepCopy) { + if !set(toKey, k, opt.DeepCopy, converters) { return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key()) } - elemType, _ := indirectType(toType.Elem()) + elemType := toType.Elem() + if elemType.Kind() != reflect.Slice { + elemType, _ = indirectType(elemType) + } toValue := indirect(reflect.New(elemType)) - if !set(toValue, from.MapIndex(k), opt.DeepCopy) { + if !set(toValue, from.MapIndex(k), opt.DeepCopy, converters) { if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil { return err } @@ -148,7 +185,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem())) } - if !set(to.Index(i), from.Index(i), opt.DeepCopy) { + if !set(to.Index(i), from.Index(i), opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt) if err != nil { @@ -203,6 +240,8 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) // check source if source.IsValid() { + copyUnexportedStructFields(dest, source) + // Copy from source field to dest field or method fromTypeFields := deepFields(fromType) for _, field := range fromTypeFields { @@ -249,7 +288,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) toField := dest.FieldByName(destFieldName) if toField.IsValid() { if toField.CanSet() { - if !set(toField, fromField, opt.DeepCopy) { + if !set(toField, fromField, opt.DeepCopy, converters) { if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil { return err } @@ -291,7 +330,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if toField := dest.FieldByName(destFieldName); toField.IsValid() && toField.CanSet() { values := fromMethod.Call([]reflect.Value{}) if len(values) >= 1 { - set(toField, values[0], opt.DeepCopy) + set(toField, values[0], opt.DeepCopy, converters) } } } @@ -303,7 +342,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest.Addr())) } else { - if !set(to.Index(i), dest.Addr(), opt.DeepCopy) { + if !set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt) if err != nil { @@ -315,7 +354,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest)) } else { - if !set(to.Index(i), dest, opt.DeepCopy) { + if !set(to.Index(i), dest, opt.DeepCopy, converters) { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt) if err != nil { @@ -334,6 +373,24 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) return } +func copyUnexportedStructFields(to, from reflect.Value) { + if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) { + return + } + + // create a shallow copy of 'to' to get all fields + tmp := indirect(reflect.New(to.Type())) + tmp.Set(from) + + // revert exported fields + for i := 0; i < to.NumField(); i++ { + if tmp.Field(i).CanSet() { + tmp.Field(i).Set(to.Field(i)) + } + } + to.Set(tmp) +} + func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool { if !ignoreEmpty { return false @@ -348,10 +405,15 @@ func deepFields(reflectType reflect.Type) []reflect.StructField { for i := 0; i < reflectType.NumField(); i++ { v := reflectType.Field(i) - if v.Anonymous { - fields = append(fields, deepFields(v.Type)...) - } else { + // PkgPath is the package path that qualifies a lower case (unexported) + // field name. It is empty for upper case (exported) field names. + // See https://golang.org/ref/spec#Uniqueness_of_identifiers + if v.PkgPath == "" { fields = append(fields, v) + if v.Anonymous { + // also consider fields of anonymous fields as fields of the root + fields = append(fields, deepFields(v.Type)...) + } } } @@ -376,8 +438,14 @@ func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) { return reflectType, isPtr } -func set(to, from reflect.Value, deepCopy bool) bool { +func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) bool { if from.IsValid() { + if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil { + return false + } else if ok { + return true + } + if to.Kind() == reflect.Ptr { // set `to` to nil if from is nil if from.Kind() == reflect.Ptr && from.IsNil() { @@ -411,6 +479,9 @@ func set(to, from reflect.Value, deepCopy bool) bool { toKind = reflect.TypeOf(to.Interface()).Kind() } } + if from.Kind() == reflect.Ptr && from.IsNil() { + return true + } if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice { return false } @@ -452,7 +523,7 @@ func set(to, from reflect.Value, deepCopy bool) bool { to.Set(rv) } } else if from.Kind() == reflect.Ptr { - return set(to, from.Elem(), deepCopy) + return set(to, from.Elem(), deepCopy, converters) } else { return false } @@ -461,6 +532,33 @@ func set(to, from reflect.Value, deepCopy bool) bool { return true } +// lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field. +func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converterPair]TypeConverter) (copied bool, err error) { + pair := converterPair{ + SrcType: from.Type(), + DstType: to.Type(), + } + + if cnv, ok := converters[pair]; ok { + result, err := cnv.Fn(from.Interface()) + + if err != nil { + return false, err + } + + if result != nil { + to.Set(reflect.ValueOf(result)) + } else { + // in case we've got a nil value to copy + to.Set(reflect.Zero(to.Type())) + } + + return true, nil + } + + return false, nil +} + // parseTags Parses struct tags and returns uint8 bit flags. func parseTags(tag string) (flg uint8, name string, err error) { for _, t := range strings.Split(tag, ",") { diff --git a/vendor/github.com/kevinburke/ssh_config/.travis.yml b/vendor/github.com/kevinburke/ssh_config/.travis.yml deleted file mode 100644 index 4306f30f854..00000000000 --- a/vendor/github.com/kevinburke/ssh_config/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -go_import_path: github.com/kevinburke/ssh_config - -language: go - -go: - - 1.11.x - - 1.12.x - - master - -before_script: - - go get -u ./... - -script: - - make race-test diff --git a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt index cd3379400dc..9dc410104b4 100644 --- a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt +++ b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt @@ -3,3 +3,4 @@ Kevin Burke Mark Nevill Sergey Lukjanov Wayne Ashley Berry +santosh653 <70637961+santosh653@users.noreply.github.com> diff --git a/vendor/github.com/kevinburke/ssh_config/README.md b/vendor/github.com/kevinburke/ssh_config/README.md index 52cc1eac4d5..d1adfed7c6e 100644 --- a/vendor/github.com/kevinburke/ssh_config/README.md +++ b/vendor/github.com/kevinburke/ssh_config/README.md @@ -17,6 +17,14 @@ want to retrieve. port := ssh_config.Get("myhost", "Port") ``` +Certain directives can occur multiple times for a host (such as `IdentityFile`), +so you should use the `GetAll` or `GetAllStrict` directive to retrieve those +instead. + +```go +files := ssh_config.GetAll("myhost", "IdentityFile") +``` + You can also load a config file and read values from it. ```go diff --git a/vendor/github.com/kevinburke/ssh_config/config.go b/vendor/github.com/kevinburke/ssh_config/config.go index 136f0c35c67..7c2c8b679ce 100644 --- a/vendor/github.com/kevinburke/ssh_config/config.go +++ b/vendor/github.com/kevinburke/ssh_config/config.go @@ -102,6 +102,13 @@ func findVal(c *Config, alias, key string) (string, error) { return val, nil } +func findAll(c *Config, alias, key string) ([]string, error) { + if c == nil { + return nil, nil + } + return c.GetAll(alias, key) +} + // Get finds the first value for key within a declaration that matches the // alias. Get returns the empty string if no value was found, or if IgnoreErrors // is false and we could not parse the configuration file. Use GetStrict to @@ -114,19 +121,51 @@ func Get(alias, key string) string { return DefaultUserSettings.Get(alias, key) } +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetAllStrict to disambiguate the +// latter cases. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The match for key is case insensitive. +// +// GetAll is a wrapper around DefaultUserSettings.GetAll. +func GetAll(alias, key string) []string { + return DefaultUserSettings.GetAll(alias, key) +} + // GetStrict finds the first value for key within a declaration that matches the // alias. If key has a default value and no matching configuration is found, the // default will be returned. For more information on default values and the way // patterns are matched, see the manpage for ssh_config. // -// error will be non-nil if and only if a user's configuration file or the -// system configuration file could not be parsed, and u.IgnoreErrors is false. +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. // // GetStrict is a wrapper around DefaultUserSettings.GetStrict. func GetStrict(alias, key string) (string, error) { return DefaultUserSettings.GetStrict(alias, key) } +// GetAllStrict retrieves zero or more directives for key for the given alias. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +// +// GetAllStrict is a wrapper around DefaultUserSettings.GetAllStrict. +func GetAllStrict(alias, key string) ([]string, error) { + return DefaultUserSettings.GetAllStrict(alias, key) +} + // Get finds the first value for key within a declaration that matches the // alias. Get returns the empty string if no value was found, or if IgnoreErrors // is false and we could not parse the configuration file. Use GetStrict to @@ -141,6 +180,17 @@ func (u *UserSettings) Get(alias, key string) string { return val } +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetStrict to disambiguate the latter +// cases. +// +// The match for key is case insensitive. +func (u *UserSettings) GetAll(alias, key string) []string { + val, _ := u.GetAllStrict(alias, key) + return val +} + // GetStrict finds the first value for key within a declaration that matches the // alias. If key has a default value and no matching configuration is found, the // default will be returned. For more information on default values and the way @@ -149,6 +199,52 @@ func (u *UserSettings) Get(alias, key string) string { // error will be non-nil if and only if a user's configuration file or the // system configuration file could not be parsed, and u.IgnoreErrors is false. func (u *UserSettings) GetStrict(alias, key string) (string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return "", u.onceErr + } + val, err := findVal(u.userConfig, alias, key) + if err != nil || val != "" { + return val, err + } + val2, err2 := findVal(u.systemConfig, alias, key) + if err2 != nil || val2 != "" { + return val2, err2 + } + return Default(key), nil +} + +// GetAllStrict retrieves zero or more directives for key for the given alias. +// If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +func (u *UserSettings) GetAllStrict(alias, key string) ([]string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return nil, u.onceErr + } + val, err := findAll(u.userConfig, alias, key) + if err != nil || val != nil { + return val, err + } + val2, err2 := findAll(u.systemConfig, alias, key) + if err2 != nil || val2 != nil { + return val2, err2 + } + // TODO: IdentityFile has multiple default values that we should return. + if def := Default(key); def != "" { + return []string{def}, nil + } + return []string{}, nil +} + +func (u *UserSettings) doLoadConfigs() { u.loadConfigs.Do(func() { // can't parse user file, that's ok. var filename string @@ -176,19 +272,6 @@ func (u *UserSettings) GetStrict(alias, key string) (string, error) { return } }) - //lint:ignore S1002 I prefer it this way - if u.onceErr != nil && u.IgnoreErrors == false { - return "", u.onceErr - } - val, err := findVal(u.userConfig, alias, key) - if err != nil || val != "" { - return val, err - } - val2, err2 := findVal(u.systemConfig, alias, key) - if err2 != nil || val2 != "" { - return val2, err2 - } - return Default(key), nil } func parseFile(filename string) (*Config, error) { @@ -282,6 +365,42 @@ func (c *Config) Get(alias, key string) (string, error) { return "", nil } +// GetAll returns all values in the configuration that match the alias and +// contains key, or nil if none are present. +func (c *Config) GetAll(alias, key string) ([]string, error) { + lowerKey := strings.ToLower(key) + all := []string(nil) + for _, host := range c.Hosts { + if !host.Matches(alias) { + continue + } + for _, node := range host.Nodes { + switch t := node.(type) { + case *Empty: + continue + case *KV: + // "keys are case insensitive" per the spec + lkey := strings.ToLower(t.Key) + if lkey == "match" { + panic("can't handle Match directives") + } + if lkey == lowerKey { + all = append(all, t.Value) + } + case *Include: + val, _ := t.GetAll(alias, key) + if len(val) > 0 { + all = append(all, val...) + } + default: + return nil, fmt.Errorf("unknown Node type %v", t) + } + } + } + + return all, nil +} + // String returns a string representation of the Config file. func (c Config) String() string { return marshal(c).String() @@ -611,6 +730,31 @@ func (inc *Include) Get(alias, key string) string { return "" } +// GetAll finds all values in the Include statement matching the alias and the +// given key. +func (inc *Include) GetAll(alias, key string) ([]string, error) { + inc.mu.Lock() + defer inc.mu.Unlock() + var vals []string + + // TODO: we search files in any order which is not correct + for i := range inc.matches { + cfg := inc.files[inc.matches[i]] + if cfg == nil { + panic("nil cfg") + } + val, err := cfg.GetAll(alias, key) + if err == nil && len(val) != 0 { + // In theory if SupportsMultiple was false for this key we could + // stop looking here. But the caller has asked us to find all + // instances of the keyword (and could use Get() if they wanted) so + // let's keep looking. + vals = append(vals, val...) + } + } + return vals, nil +} + // String prints out a string representation of this Include directive. Note // included Config files are not printed as part of this representation. func (inc *Include) String() string { diff --git a/vendor/github.com/kevinburke/ssh_config/validators.go b/vendor/github.com/kevinburke/ssh_config/validators.go index 29fab6a9d2f..5977f90960f 100644 --- a/vendor/github.com/kevinburke/ssh_config/validators.go +++ b/vendor/github.com/kevinburke/ssh_config/validators.go @@ -160,3 +160,27 @@ var defaults = map[string]string{ strings.ToLower("VisualHostKey"): "no", strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth", } + +// these identities are used for SSH protocol 2 +var defaultProtocol2Identities = []string{ + "~/.ssh/id_dsa", + "~/.ssh/id_ecdsa", + "~/.ssh/id_ed25519", + "~/.ssh/id_rsa", +} + +// these directives support multiple items that can be collected +// across multiple files +var pluralDirectives = map[string]bool{ + "CertificateFile": true, + "IdentityFile": true, + "DynamicForward": true, + "RemoteForward": true, + "SendEnv": true, + "SetEnv": true, +} + +// SupportsMultiple reports whether a directive can be specified multiple times. +func SupportsMultiple(key string) bool { + return pluralDirectives[strings.ToLower(key)] +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index c9014ce1da2..0af08e65e68 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,6 +3,7 @@ before: hooks: - ./gen.sh + - go install mvdan.cc/garble@latest builds: - @@ -31,6 +32,7 @@ builds: - mips64le goarm: - 7 + gobinary: garble - id: "s2d" binary: s2d @@ -57,6 +59,7 @@ builds: - mips64le goarm: - 7 + gobinary: garble - id: "s2sx" binary: s2sx @@ -84,6 +87,7 @@ builds: - mips64le goarm: - 7 + gobinary: garble archives: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 3429879eb69..0e2dc116ad2 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -17,6 +17,49 @@ This package provides various compression algorithms. # changelog +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +
+ See Details +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. +
+ +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) + * Aug 30, 2021 (v1.13.5) * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) @@ -46,6 +89,9 @@ This package provides various compression algorithms. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ See changes to v1.12.x + * May 25, 2021 (v1.12.3) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) @@ -67,9 +113,10 @@ This package provides various compression algorithms. * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
- See changes prior to v1.12.1 + See changes to v1.11.x * Mar 26, 2021 (v1.11.13) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) @@ -128,7 +175,7 @@ This package provides various compression algorithms.
- See changes prior to v1.11.0 + See changes to v1.10.x * July 8, 2020 (v1.10.11) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) @@ -290,11 +337,6 @@ This package provides various compression algorithms. # deflate usage -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | old import | new import | Documentation @@ -316,6 +358,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using the stateless compress described below. +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. @@ -432,6 +476,13 @@ For more information see my blog post on [Fast Linear Time Compression](http://b This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. # license diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 5283ac5a538..bffa2f33236 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -6,6 +6,7 @@ package flate import ( + "encoding/binary" "fmt" "io" "math" @@ -37,15 +38,17 @@ const ( maxMatchLength = 258 // The longest match for the compressor minOffsetSize = 1 // The shortest offset that makes any sense - // The maximum number of tokens we put into a single flat block, just too - // stop things from getting too large. - maxFlateBlockTokens = 1 << 14 + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 maxStoreBlockSize = 65535 hashBits = 17 // After 17 performance degrades hashSize = 1 << hashBits hashMask = (1 << hashBits) - 1 hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 24 + maxHashOffset = 1 << 28 skipNever = math.MaxInt32 @@ -70,9 +73,9 @@ var levels = []compressionLevel{ {0, 0, 0, 0, 0, 6}, // Levels 7-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". - {8, 8, 24, 16, skipNever, 7}, - {10, 16, 24, 64, skipNever, 8}, - {32, 258, 258, 4096, skipNever, 9}, + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, } // advancedState contains state for the advanced levels, with bigger hash tables, etc. @@ -93,8 +96,9 @@ type advancedState struct { hashOffset int // input window: unprocessed data is window[index:windowEnd] - index int - hashMatch [maxMatchLength + minMatchLength]uint32 + index int + estBitsPerByte int + hashMatch [maxMatchLength + minMatchLength]uint32 hash uint32 ii uint16 // position of last match, intended to overflow to reset. @@ -103,6 +107,7 @@ type advancedState struct { type compressor struct { compressionLevel + h *huffmanEncoder w *huffmanBitWriter // compression algorithm @@ -170,7 +175,8 @@ func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { window = d.window[d.blockStart:index] } d.blockStart = index - d.w.writeBlock(tok, eof, window) + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) return d.w.err } return nil @@ -263,7 +269,7 @@ func (d *compressor) fillWindow(b []byte) { // Try to find a match starting at index whose length is greater than prevSize. // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { minMatchLook := maxMatchLength if lookahead < minMatchLook { minMatchLook = lookahead @@ -279,36 +285,75 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain - length = prevLength - if length >= d.good { - tries >>= 2 - } + length = minMatchLength - 1 wEnd := win[pos+length] wPos := win[pos:] minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + cGain := 0 + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 6 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLen(win[i:i+minMatchLook], wPos) - - if n > length && (n > minMatchLength || pos-i <= 4096) { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n])) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] } - wEnd = win[pos+n] } } - if i == minIndex { + if i <= minIndex { // hashPrev[i & windowMask] has already been overwritten, so stop now. break } i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex || i < 0 { + if i < minIndex { break } } @@ -327,8 +372,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error { // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - b = b[:4] - return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits) + return hash4u(binary.LittleEndian.Uint32(b), hashBits) } // bulkHash4 will compute hashes using the same @@ -337,11 +381,12 @@ func bulkHash4(b []byte, dst []uint32) { if len(b) < 4 { return } - hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + hb := binary.LittleEndian.Uint32(b) + dst[0] = hash4u(hb, hashBits) end := len(b) - 4 + 1 for i := 1; i < end; i++ { - hb = (hb << 8) | uint32(b[i+3]) + hb = (hb >> 8) | uint32(b[i+3])<<24 dst[i] = hash4u(hb, hashBits) } } @@ -374,10 +419,21 @@ func (d *compressor) deflateLazy() { if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { return } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) if s.index < s.maxInsertIndex { - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) + s.hash = hash4(d.window[s.index:]) } for { @@ -410,7 +466,7 @@ func (d *compressor) deflateLazy() { } if s.index < s.maxInsertIndex { // Update the hash - s.hash = hash4(d.window[s.index : s.index+minMatchLength]) + s.hash = hash4(d.window[s.index:]) ch := s.hashHead[s.hash&hashMask] s.chainHead = int(ch) s.hashPrev[s.index&windowMask] = ch @@ -426,12 +482,37 @@ func (d *compressor) deflateLazy() { } if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { s.length = newLength s.offset = newOffset } } + if prevLength >= minMatchLength && s.length <= prevLength { + // Check for better match at end... + // + // checkOff must be >=2 since we otherwise risk checking s.index + // Offset of 2 seems to yield best results. + const checkOff = 2 + prevIndex := s.index - 1 + if prevIndex+prevLength+checkOff < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength { + end = maxMatchLength + } + end += prevIndex + idx := prevIndex + prevLength - (4 - checkOff) + h := hash4(d.window[idx:]) + ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff) + if ch2 > minIndex { + length := matchLen(d.window[prevIndex:end], d.window[ch2:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + } + } + } // There was a match at the previous step, and the current match is // not better. Output the previous match. d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) @@ -479,6 +560,7 @@ func (d *compressor) deflateLazy() { } d.tokens.Reset() } + s.ii = 0 } else { // Reset, if we got a match this run. if s.length >= minMatchLength { @@ -498,13 +580,12 @@ func (d *compressor) deflateLazy() { // If we have a long run of no matches, skip additional bytes // Resets when s.ii overflows after 64KB. - if s.ii > 31 { - n := int(s.ii >> 5) + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) for j := 0; j < n; j++ { if s.index >= d.windowEnd-1 { break } - d.tokens.AddLiteral(d.window[s.index-1]) if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { @@ -512,6 +593,14 @@ func (d *compressor) deflateLazy() { } d.tokens.Reset() } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } s.index++ } // Flush last byte @@ -611,7 +700,9 @@ func (d *compressor) write(b []byte) (n int, err error) { } n = len(b) for len(b) > 0 { - d.step(d) + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } b = b[d.fill(d, b):] if d.err != nil { return 0, d.err @@ -652,13 +743,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) { level = 5 fallthrough case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 8 + d.w.logNewTablePenalty = 7 d.fast = newFastEnc(level) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeFast case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 10 + d.w.logNewTablePenalty = 8 d.state = &advancedState{} d.compressionLevel = levels[level] d.initDeflate() diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index a746eb73387..d55ea2a7759 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDecode { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } @@ -213,11 +213,9 @@ func (e *fastGen) Reset() { // matchLen returns the maximum length. // 'a' must be the shortest of the two. func matchLen(a, b []byte) int { - b = b[:len(a)] var checked int for len(a) >= 8 { - b = b[:len(a)] if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { return checked + (bits.TrailingZeros64(diff) >> 3) } diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 3ad5e980724..25f6d1108fc 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "fmt" "io" + "math" ) const ( @@ -24,6 +25,10 @@ const ( codegenCodeCount = 19 badCode = 255 + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since @@ -36,8 +41,11 @@ const ( bufferSize = bufferFlushSize + 8 ) +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + // The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]int8{ +var lengthExtraBits = [32]uint8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, @@ -51,19 +59,22 @@ var lengthBase = [32]uint8{ 64, 80, 96, 112, 128, 160, 192, 224, 255, } +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + // offset code word extra bits. -var offsetExtraBits = [64]int8{ +var offsetExtraBits = [32]int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, /* extended window */ - 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, + 14, 14, } var offsetCombined = [32]uint32{} func init() { - var offsetBase = [64]uint32{ + var offsetBase = [32]uint32{ /* normal deflate */ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, @@ -73,17 +84,15 @@ func init() { 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, /* extended window */ - 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, - 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, - 0x100000, 0x180000, 0x200000, 0x300000, + 0x008000, 0x00c000, } for i := range offsetCombined[:] { // Don't use extended window values... - if offsetBase[i] > 0x006000 { + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { continue } - offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i]) + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) } } @@ -99,7 +108,7 @@ type huffmanBitWriter struct { // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 - nbits uint16 + nbits uint8 nbytes uint8 lastHuffMan bool literalEncoding *huffmanEncoder @@ -155,37 +164,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) { w.lastHuffMan = false } -func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) { - offsets, lits = true, true +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { a := t.offHist[:offsetCodeCount] - b := w.offsetFreq[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - offsets = false - break + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].len == 0 { + return false } } a = t.extraHist[:literalCount-256] - b = w.literalFreq[256:literalCount] + b = w.literalEncoding.codes[256:literalCount] b = b[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - lits = false - break + for i, v := range a { + if v != 0 && b[i].len == 0 { + return false } } - if lits { - a = t.litHist[:] - b = w.literalFreq[:len(a)] - for i := range a { - if b[i] == 0 && a[i] != 0 { - lits = false - break - } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].len == 0 { + return false } } - return + return true } func (w *huffmanBitWriter) flush() { @@ -221,8 +226,8 @@ func (w *huffmanBitWriter) write(b []byte) { _, w.err = w.writer.Write(b) } -func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { - w.bits |= uint64(b) << w.nbits +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) w.nbits += nb if w.nbits >= 48 { w.writeOutBits() @@ -423,7 +428,7 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { func (w *huffmanBitWriter) writeCode(c hcode) { // The function does not get inlined if we "& 63" the shift. - w.bits |= uint64(c.code) << w.nbits + w.bits |= uint64(c.code) << (w.nbits & 63) w.nbits += c.len if w.nbits >= 48 { w.writeOutBits() @@ -566,7 +571,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { w.lastHeader = 0 } numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate(tokens) + w.generate() var extraBits int storedSize, storable := w.storedSize(input) if storable { @@ -577,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding - var size = w.fixedSize(extraBits) + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } // Dynamic Huffman? var numCodegens int @@ -595,7 +603,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { } // Stored bytes? - if storable && storedSize < size { + if storable && storedSize <= size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -634,22 +642,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 w.lastHuffMan = false } - if !sync { - tokens.Fill() + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 } + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } var size int + // Check if we should reuse. if w.lastHeader > 0 { // Estimate size for using a new table. // Use the previous header size as the best estimate. newSize := w.lastHeader + tokens.EstimatedBits() - newSize += newSize >> w.logNewTablePenalty + newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty // The estimated size is calculated as an optimal table. // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize() + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits // Check if a new table is better. if newSize < reuseSize { @@ -660,35 +685,83 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b } else { size = reuseSize } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } // Check if we get a reasonable size decrease. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + if storable && ssize <= size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) - w.lastHeader = 0 return } } // We want a new block/table if w.lastHeader == 0 { - w.generate(tokens) + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) w.codegenEncoding.generate(w.codegenFreq[:], 7) + var numCodegens int - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize()) - // Store bytes, if we don't get a reasonable improvement. - if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. w.writeStoredHeader(len(input), eof) w.writeBytes(input) - w.lastHeader = 0 return } // Write Huffman table. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHeader, _ = w.headerSize() + if !sync { + w.lastHeader, _ = w.headerSize() + } w.lastHuffMan = false } @@ -699,6 +772,19 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) } +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + // indexTokens indexes a slice of tokens, and updates // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. @@ -733,7 +819,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num return } -func (w *huffmanBitWriter) generate(t *tokens) { +func (w *huffmanBitWriter) generate() { w.literalEncoding.generate(w.literalFreq[:literalCount], 15) w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) } @@ -765,10 +851,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits, nbits, nbytes := w.bits, w.nbits, w.nbytes for _, t := range tokens { - if t < matchType { + if t < 256 { //w.writeCode(lits[t.literal()]) - c := lits[t.literal()] - bits |= uint64(c.code) << nbits + c := lits[t] + bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -790,13 +876,13 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) // Write the length length := t.length() - lengthCode := lengthCode(length) + lengthCode := lengthCode(length) & 31 if false { - w.writeCode(lengths[lengthCode&31]) + w.writeCode(lengths[lengthCode]) } else { // inlined - c := lengths[lengthCode&31] - bits |= uint64(c.code) << nbits + c := lengths[lengthCode] + bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -815,11 +901,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } - extraLengthBits := uint16(lengthExtraBits[lengthCode&31]) - if extraLengthBits > 0 { + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode&31]) - bits |= uint64(extraLength) << nbits + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -839,14 +925,13 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } // Write the offset offset := t.offset() - offsetCode := offset >> 16 - offset &= matchOffsetOnlyMask + offsetCode := (offset >> 16) & 31 if false { - w.writeCode(offs[offsetCode&31]) + w.writeCode(offs[offsetCode]) } else { // inlined c := offs[offsetCode] - bits |= uint64(c.code) << nbits + bits |= uint64(c.code) << (nbits & 63) nbits += c.len if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) @@ -864,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) } } } - offsetComb := offsetCombined[offsetCode] - if offsetComb > 1<<16 { + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits - nbits += uint16(offsetComb >> 16) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -934,6 +1020,29 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // https://stackoverflow.com/a/25454430 const guessHeaderSizeBits = 70 * 8 histogram(input, w.literalFreq[:numLiterals], fill) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } w.literalFreq[endBlockMarker] = 1 w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) if fill { @@ -951,8 +1060,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { estBits += estBits >> w.logNewTablePenalty // Store bytes, if we don't get a reasonable improvement. - ssize, storable := w.storedSize(input) if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } w.writeStoredHeader(len(input), eof) w.writeBytes(input) return @@ -963,7 +1074,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { if estBits < reuseSize { if debugDeflate { - //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") } // We owe an EOB w.writeCode(w.literalEncoding.codes[endBlockMarker]) @@ -996,14 +1107,44 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { encoding := w.literalEncoding.codes[:256] // Go 1.16 LOVES having these on stack. At least 1.5x the speed. bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - for _, t := range input { - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= uint64(c.code) << nbits - nbits += c.len - if debugDeflate { - count += int(c.len) + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 } + a, b := encoding[input[0]], encoding[input[1]] + bits |= uint64(a.code) << (nbits & 63) + bits |= uint64(b.code) << ((nbits + a.len) & 63) + c := encoding[input[2]] + nbits += b.len + a.len + bits |= uint64(c.code) << (nbits & 63) + nbits += c.len + input = input[3:] + } + + // Remaining... + for _, t := range input { if nbits >= 48 { binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits @@ -1015,17 +1156,33 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { nbytes = 0 return } + if debugDeflate { + count += int(nbytes) * 8 + } _, w.err = w.writer.Write(w.bytes[:nbytes]) nbytes = 0 } } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= uint64(c.code) << (nbits & 63) + nbits += c.len + if debugDeflate { + count += int(c.len) + } } // Restore... w.bits, w.nbits, w.nbytes = bits, nbits, nbytes if debugDeflate { - fmt.Println("wrote", count/8, "bytes") + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + if eof || sync { w.writeCode(w.literalEncoding.codes[endBlockMarker]) w.lastHeader = 0 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index 67b2b387284..9ab497c275b 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -17,7 +17,8 @@ const ( // hcode is a huffman code with a bit code and bit length. type hcode struct { - code, len uint16 + code uint16 + len uint8 } type huffmanEncoder struct { @@ -56,7 +57,7 @@ type levelInfo struct { } // set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint16) { +func (h *hcode) set(code uint16, length uint8) { h.len = length h.code = code } @@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { var ch uint16 for ch = 0; ch < literalCount; ch++ { var bits uint16 - var size uint16 + var size uint8 switch { case ch < 144: // size 8, 000110000 .. 10111111 @@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { bits = ch + 192 - 280 size = 8 } - codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + codes[ch] = hcode{code: reverseBits(bits, size), len: size} } return h } @@ -129,9 +130,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int { func (h *huffmanEncoder) bitLengthRaw(b []byte) int { var total int for _, f := range b { - if f != 0 { - total += int(h.codes[f].len) - } + total += int(h.codes[f].len) } return total } @@ -189,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, - lastFreq: int32(list[1].freq), - nextCharFreq: int32(list[2].freq), - nextPairFreq: int32(list[0].freq) + int32(list[1].freq), + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, } leafCounts[level][level] = 2 if level == 1 { @@ -207,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 - level := maxBits - for { + level := uint32(maxBits) + for level < 16 { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. @@ -240,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. - copy(leafCounts[level][:level], leafCounts[level-1][:level]) + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } levels[l.level-1].needed = 2 } @@ -298,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN sortByLiteral(chunk) for _, node := range chunk { - h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)} code++ } list = list[0 : len(list)-int(bits)] @@ -311,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies @@ -319,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { list[count] = literalNode{uint16(i), f} count++ } else { - list[count] = literalNode{} - h.codes[i].len = 0 + codes[i].len = 0 } } - list[len(freq)] = literalNode{} + list[count] = literalNode{} list = list[:count] if count <= 2 { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index d1edb356c4b..414c0bea9fa 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -36,6 +36,13 @@ type lengthExtra struct { var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder @@ -328,11 +335,17 @@ func (f *decompressor) nextBlock() { switch typ { case 0: f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } case 1: // compressed, fixed Huffman tables f.hl = &fixedHuffmanDecoder f.hd = nil f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("predefinied huffman block") + } case 2: // compressed, dynamic Huffman tables if f.err = f.readHuffman(); f.err != nil { @@ -341,6 +354,9 @@ func (f *decompressor) nextBlock() { f.hl = &f.h1 f.hd = &f.h2 f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("dynamic huffman block") + } default: // 3 is reserved. if debugDecode { @@ -550,221 +566,6 @@ func (f *decompressor) readHuffman() error { return nil } -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBlockGeneric() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - f.err = noEOF(err) - return - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var n uint // number of bits extra - var length int - var err error - switch { - case v < 256: - f.dict.writeByte(byte(v)) - if f.dict.availWrite() == 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric - f.stepState = stateInit - return - } - goto readLiteral - case v == 256: - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - n = 0 - case v < 269: - length = v*2 - (265*2 - 11) - n = 1 - case v < 273: - length = v*4 - (269*4 - 19) - n = 2 - case v < 277: - length = v*8 - (273*8 - 35) - n = 3 - case v < 281: - length = v*16 - (277*16 - 67) - n = 4 - case v < 285: - length = v*32 - (281*32 - 131) - n = 5 - case v < maxNumLit: - length = 258 - n = 0 - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - return - } - if n > 0 { - for f.nb < n { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n - } - - var dist uint32 - if f.hd == nil { - for f.nb < 5 { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 - } else { - sym, err := f.huffSym(f.hd) - if err != nil { - if debugDecode { - fmt.Println("huffsym:", err) - } - f.err = err - return - } - dist = uint32(sym) - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { - if err = f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - default: - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(f.dict.histSize()) { - if debugDecode { - fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = f.dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work - f.stepState = stateDict - return - } - goto readLiteral - } -} - // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index cc6db27925c..8d632cea0f5 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() { ) fr := f.r.(*bytes.Buffer) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -39,41 +44,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -88,10 +87,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -101,9 +102,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -111,25 +113,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -137,12 +141,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -152,38 +156,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -197,9 +198,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -224,6 +228,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -248,10 +253,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() { ) fr := f.r.(*bytes.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -283,41 +295,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -332,10 +338,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -345,9 +353,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -355,25 +364,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -381,12 +392,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -396,38 +407,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -441,9 +449,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -468,6 +479,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -492,10 +504,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() { ) fr := f.r.(*bufio.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -527,41 +546,35 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -576,10 +589,12 @@ readLiteral: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -589,9 +604,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -599,25 +615,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -625,12 +643,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -640,38 +658,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -685,9 +700,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -712,6 +730,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -736,10 +755,12 @@ copyHistory: f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } // Decode a single Huffman block from f. @@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() { ) fr := f.r.(*strings.Reader) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + switch f.stepState { case stateInit: goto readLiteral @@ -771,41 +797,286 @@ readLiteral: // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb := f.nb, f.b + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hl.chunks[b&(huffmanNumChunks-1)] + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n v = int(chunk >> huffmanValueShift) break } @@ -818,12 +1089,14 @@ readLiteral: f.dict.writeByte(byte(v)) if f.dict.availWrite() == 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader + f.step = (*decompressor).huffmanGenericReader f.stepState = stateInit + f.b, f.nb = fb, fnb return } goto readLiteral case v == 256: + f.b, f.nb = fb, fnb f.finishBlock() return // otherwise, reference to older data @@ -833,9 +1106,10 @@ readLiteral: val := decCodeToLen[(v - 257)] length = int(val.length) + 3 n := uint(val.extra) - for f.nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits n>0:", err) } @@ -843,25 +1117,27 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) - f.b >>= n & regSizeMaskUint32 - f.nb -= n + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n default: if debugDecode { fmt.Println(v, ">= maxNumLit") } f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb return } var dist uint32 if f.hd == nil { - for f.nb < 5 { + for fnb < 5 { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb<5:", err) } @@ -869,12 +1145,12 @@ readLiteral: return } f.roffset++ - f.b |= uint32(c) << f.nb - f.nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) - f.b >>= 5 - f.nb -= 5 + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 } else { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both @@ -884,38 +1160,35 @@ readLiteral: // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // but is smart enough to keep local variables in registers, so use nb and b, // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b for { - for nb < n { + for fnb < n { c, err := fr.ReadByte() if err != nil { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb f.err = noEOF(err) return } f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 } - chunk := f.hd.chunks[b&(huffmanNumChunks-1)] + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask] + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] n = uint(chunk & huffmanCountMask) } - if n <= nb { + if n <= fnb { if n == 0 { - f.b = b - f.nb = nb + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("huffsym: n==0") } f.err = CorruptInputError(f.roffset) return } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n dist = uint32(chunk >> huffmanValueShift) break } @@ -929,9 +1202,10 @@ readLiteral: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << (nb & regSizeMaskUint32) - for f.nb < nb { + for fnb < nb { c, err := fr.ReadByte() if err != nil { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - f.nb -= nb + extra |= fb & bitMask32[nb] + fb >>= nb & regSizeMaskUint32 + fnb -= nb dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra default: + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist too big:", dist, maxNumDist) } @@ -956,6 +1232,7 @@ readLiteral: // No check on length; encoding can be prescient. if dist > uint32(f.dict.histSize()) { + f.b, f.nb = fb, fnb if debugDecode { fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) } @@ -978,12 +1255,14 @@ copyHistory: if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.stepState = stateDict + f.b, f.nb = fb, fnb return } goto readLiteral } + // Not reached } func (f *decompressor) huffmanBlockDecoder() func() { @@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() { return f.huffmanBufioReader case *strings.Reader: return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader default: - return f.huffmanBlockGeneric + return f.huffmanGenericReader } } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 1e5eea3968a..0f14f8d63b4 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,6 +1,10 @@ package flate -import "fmt" +import ( + "encoding/binary" + "fmt" + "math/bits" +) // fastGen maintains the table for matches, // and the previous byte block for level 2. @@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { @@ -125,11 +154,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 234c4389ab3..8603fbd55ad 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -134,7 +134,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index c22b4244a5c..039639f8989 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -5,7 +5,7 @@ import "fmt" // fastEncL3 type fastEncL3 struct { fastGen - table [tableSize]tableEntryPrev + table [1 << 16]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. @@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits ) if debugDeflate && e.cur < 0 { @@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry for { - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) s = nextS nextS = s + 1 + (s-nextEmit)>>skipLog if nextS > sLimit { @@ -141,7 +143,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) @@ -156,7 +166,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) - nextHash := hash(cv) + nextHash := hash4u(cv, tableBits) e.table[nextHash] = tableEntryPrev{ Prev: e.table[nextHash].Cur, Cur: tableEntry{offset: e.cur + t}, @@ -165,30 +175,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { goto emitRemainder } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-3 to s. - x := load6432(src, s-3) - prevHash := hash(uint32(x)) - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 3}, + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 5 { + nextHash := hash4u(load3232(src, i), tableBits) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} } - x >>= 8 - prevHash = hash(uint32(x)) + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 2}, } x >>= 8 - prevHash = hash(uint32(x)) + prevHash = hash4u(uint32(x), tableBits) e.table[prevHash] = tableEntryPrev{ Prev: e.table[prevHash].Cur, Cur: tableEntry{offset: e.cur + s - 1}, } x >>= 8 - currHash := hash(uint32(x)) + currHash := hash4u(uint32(x), tableBits) candidates := e.table[currHash] cv = uint32(x) e.table[currHash] = tableEntryPrev{ @@ -200,15 +211,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { candidate = candidates.Cur minOffset := e.cur + s - (maxMatchOffset - 4) - if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { - // We only check if value mismatches. - // Offset will always be invalid in other cases. + if candidate.offset > minOffset { + if cv == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } candidate = candidates.Prev if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { - offset := s - (candidate.offset - e.cur) - if offset <= maxMatchOffset { - continue - } + // Match at prev... + continue } } cv = uint32(x >> 8) diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index e62f0c02b1e..1cbffa1aefe 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -135,7 +135,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 293a3a320b7..4b97576bd38 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -210,7 +210,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if debugDeflate { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index a709977ec49..62888edf3cd 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -243,7 +243,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } if false { if t >= s { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 53e89912463..544162a4318 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { l++ } if nextEmit < s { - emitLiteral(dst, src[nextEmit:s]) + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } } // Save the match found diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index eb862d7a920..d818790c132 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -13,11 +13,10 @@ import ( ) const ( - // From top - // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused - // 8 bits: xlength = length - MIN_MATCH_LENGTH - // 5 bits offsetcode - // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits lengthShift = 22 offsetMask = 1<maxnumlit offHist [32]uint16 // offset codes litHist [256]uint16 // codes 0->255 - n uint16 // Must be able to contain maxStoreBlockSize + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize tokens [maxStoreBlockSize + 1]token } @@ -142,7 +141,7 @@ func (t *tokens) Reset() { return } t.n = 0 - t.nLits = 0 + t.nFilled = 0 for i := range t.litHist[:] { t.litHist[i] = 0 } @@ -161,12 +160,12 @@ func (t *tokens) Fill() { for i, v := range t.litHist[:] { if v == 0 { t.litHist[i] = 1 - t.nLits++ + t.nFilled++ } } for i, v := range t.extraHist[:literalCount-256] { if v == 0 { - t.nLits++ + t.nFilled++ t.extraHist[i] = 1 } } @@ -196,20 +195,17 @@ func (t *tokens) indexTokens(in []token) { // emitLiteral writes a literal chunk and returns the number of bytes written. func emitLiteral(dst *tokens, lit []byte) { - ol := int(dst.n) - for i, v := range lit { - dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + for _, v := range lit { + dst.tokens[dst.n] = token(v) dst.litHist[v]++ + dst.n++ } - dst.n += uint16(len(lit)) - dst.nLits += len(lit) } func (t *tokens) AddLiteral(lit byte) { t.tokens[t.n] = token(lit) t.litHist[lit]++ t.n++ - t.nLits++ } // from https://stackoverflow.com/a/28730362 @@ -230,8 +226,9 @@ func (t *tokens) EstimatedBits() int { shannon := float32(0) bits := int(0) nMatches := 0 - if t.nLits > 0 { - invTotal := 1.0 / float32(t.nLits) + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) for _, v := range t.litHist[:] { if v > 0 { n := float32(v) @@ -275,10 +272,9 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { } oCode := offsetCode(xoffset) xoffset |= oCode << 16 - t.nLits++ t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode]++ + t.offHist[oCode&31]++ t.tokens[t.n] = token(matchType | xlength< 258 { // We need to have at least baseMatchLength left over for next loop. - xl = 258 - baseMatchLength + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } } xlength -= xl xl -= baseMatchLength - t.nLits++ t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc]++ + t.offHist[oc&31]++ t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } -// The code is never more than 8 bits, but is returned as uint32 for convenience. -func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) } +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go new file mode 100644 index 00000000000..ff2c69d60cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/autogen.go @@ -0,0 +1,5 @@ +package huff0 + +//go:generate go run generate.go +//go:generate asmfmt -w decompress_amd64.s +//go:generate asmfmt -w decompress_8b_amd64.s diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e8868a..451160edda3 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -263,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { return uint16(b.value >> ((64 - n) & 63)) } +// peekTopBits(n) is equvialent to peekBitFast(64 - n) +func (b *bitReaderShifted) peekTopBits(n uint8) uint16 { + return uint16(b.value >> n) +} + func (b *bitReaderShifted) advance(n uint8) { b.bitsRead += n b.value <<= n & 63 @@ -318,10 +225,17 @@ func (b *bitReaderShifted) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 8323dc05389..bc95ac623bd 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 9b7cc8e97bb..04f6529955e 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) @@ -20,7 +21,7 @@ type dEntrySingle struct { // double-symbols decoding type dEntryDouble struct { - seq uint16 + seq [4]byte nBits uint8 len uint8 } @@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} } // Decompress1X will decompress a 1X encoded stream. @@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { dt := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 for br.off >= 8 { @@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { bitsLeft -= nBits dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 switch d.actualTableLog { @@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } } default: + d.bufs.Put(bufs) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 const shift = 56 @@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -688,199 +721,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -914,18 +758,18 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - shift := (8 - d.actualTableLog) & 7 + shift := (56 + (8 - d.actualTableLog)) & 63 const tlSize = 1 << 8 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -935,96 +779,109 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1032,23 +889,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1068,24 +933,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -1121,18 +993,18 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { out := dst dstEvery := (dstSize + 3) / 4 - const shift = 0 + const shift = 56 const tlSize = 1 << 8 const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -1142,96 +1014,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1241,21 +1126,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1275,24 +1166,32 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry + v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s new file mode 100644 index 00000000000..0d6cb1a962b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s @@ -0,0 +1,488 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: + + // const stream = 0 + // br0.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read + MOVQ bitReaderShifted_value(br0), br_value + MOVQ bitReaderShifted_off(br0), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill0 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br0), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br0.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill0: + + // val0 := br0.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 0(buffer)(off*1) + + // SECOND PART: + // val2 := br0.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 0+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br0) + MOVQ br_value, bitReaderShifted_value(br0) + MOVQ br_offset, bitReaderShifted_off(br0) + + // const stream = 1 + // br1.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read + MOVQ bitReaderShifted_value(br1), br_value + MOVQ bitReaderShifted_off(br1), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill1 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br1), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br1.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill1: + + // val0 := br1.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 256(buffer)(off*1) + + // SECOND PART: + // val2 := br1.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 256+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br1) + MOVQ br_value, bitReaderShifted_value(br1) + MOVQ br_offset, bitReaderShifted_off(br1) + + // const stream = 2 + // br2.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read + MOVQ bitReaderShifted_value(br2), br_value + MOVQ bitReaderShifted_off(br2), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill2 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br2), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br2.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill2: + + // val0 := br2.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 512(buffer)(off*1) + + // SECOND PART: + // val2 := br2.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 512+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br2) + MOVQ br_value, bitReaderShifted_value(br2) + MOVQ br_offset, bitReaderShifted_off(br2) + + // const stream = 3 + // br3.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read + MOVQ bitReaderShifted_value(br3), br_value + MOVQ bitReaderShifted_off(br3), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill3 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br3), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br3.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill3: + + // val0 := br3.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 768(buffer)(off*1) + + // SECOND PART: + // val2 := br3.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, 768+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br3) + MOVQ br_value, bitReaderShifted_value(br3) + MOVQ br_offset, bitReaderShifted_off(br3) + + ADDQ $4, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop + +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET + +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in new file mode 100644 index 00000000000..6d477a2c11e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in @@ -0,0 +1,197 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: +{{ define "decode_2_values_x86" }} + // const stream = {{ var "id" }} + // br{{ var "id"}}.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read + MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value + MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset + + // if b.bitsRead >= 32 { + CMPQ br_bits_read, $32 + JB skip_fill{{ var "id" }} + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br{{ var "id" }}), AX + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVQ br_bits_read, CX + SHLQ CL, AX + ORQ AX, br_value + + // exhausted = exhausted || (br{{ var "id"}}.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + // } +skip_fill{{ var "id" }}: + + // val0 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val1 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, {{ var "bufofs" }}(buffer)(off*1) + + // SECOND PART: + // val2 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v2 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + // val3 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + + // v3 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVBQZX AL, CX + SHLQ CX, br_value // value <<= n + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off+2] = uint8(v2.entry >> 8) + // buf[stream][off+3] = uint8(v3.entry >> 8) + MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) + MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) + MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) +{{ end }} + + {{ set "id" "0" }} + {{ set "ofs" "0" }} + {{ set "bufofs" "0" }} {{/* id * bufoff */}} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "1" }} + {{ set "ofs" "8" }} + {{ set "bufofs" "256" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "2" }} + {{ set "ofs" "16" }} + {{ set "bufofs" "512" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "3" }} + {{ set "ofs" "24" }} + {{ set "bufofs" "768" }} + {{ template "decode_2_values_x86" . }} + + ADDQ $4, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000000..d47f6644f3f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,181 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// that uses an asm implementation of its main loop. +package huff0 + +import ( + "errors" + "fmt" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// go:noescape +func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, + peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// go:noescape +func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, + peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + const debug = false + + // see: bitReaderShifted.peekBitsFast() + peekBits := uint8((64 - d.actualTableLog) & 63) + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + if use8BitTables { + off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) + } else { + off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) + } + if debug { + fmt.Print("DEBUG: ") + fmt.Printf("off=%d,", off) + for i := 0; i < 4; i++ { + fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}", + i, br[i].bitsRead, br[i].value, br[i].off) + } + fmt.Println("") + } + + if off != 0 { + break + } + + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000000..2edad3ea5a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,506 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: + + // const stream = 0 + // br0.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read + MOVQ bitReaderShifted_value(br0), br_value + MOVQ bitReaderShifted_off(br0), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill0 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br0), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br0.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill0: + + // val0 := br0.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br0.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br0.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br0.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 0(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br0) + MOVQ br_value, bitReaderShifted_value(br0) + MOVQ br_offset, bitReaderShifted_off(br0) + + // const stream = 1 + // br1.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read + MOVQ bitReaderShifted_value(br1), br_value + MOVQ bitReaderShifted_off(br1), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill1 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br1), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br1.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill1: + + // val0 := br1.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br1.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br1.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br1.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 256(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br1) + MOVQ br_value, bitReaderShifted_value(br1) + MOVQ br_offset, bitReaderShifted_off(br1) + + // const stream = 2 + // br2.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read + MOVQ bitReaderShifted_value(br2), br_value + MOVQ bitReaderShifted_off(br2), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill2 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br2), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br2.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill2: + + // val0 := br2.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br2.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br2.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br2.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 512(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br2) + MOVQ br_value, bitReaderShifted_value(br2) + MOVQ br_offset, bitReaderShifted_off(br2) + + // const stream = 3 + // br3.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read + MOVQ bitReaderShifted_value(br3), br_value + MOVQ bitReaderShifted_off(br3), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill3 + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br3), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX + +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br3.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + + // } +skip_fill3: + + // val0 := br3.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br3.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + +#else + // val1 := br3.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask + +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br3.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n + +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n + +#endif + + ADDQ CX, br_bits_read // bits_read += n + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, 768(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br3) + MOVQ br_value, bitReaderShifted_value(br3) + MOVQ br_offset, bitReaderShifted_off(br3) + + ADDQ $2, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop + +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET + +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in new file mode 100644 index 00000000000..330d86ae155 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in @@ -0,0 +1,195 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + +#define bufoff 256 // see decompress.go, we're using [4][256]byte table + +//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, +// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) +TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 +#define off R8 +#define buffer DI +#define table SI + +#define br_bits_read R9 +#define br_value R10 +#define br_offset R11 +#define peek_bits R12 +#define exhausted DX + +#define br0 R13 +#define br1 R14 +#define br2 R15 +#define br3 BP + + MOVQ BP, 0(SP) + + XORQ exhausted, exhausted // exhausted = false + XORQ off, off // off = 0 + + MOVBQZX peekBits+32(FP), peek_bits + MOVQ buf+40(FP), buffer + MOVQ tbl+48(FP), table + + MOVQ pbr0+0(FP), br0 + MOVQ pbr1+8(FP), br1 + MOVQ pbr2+16(FP), br2 + MOVQ pbr3+24(FP), br3 + +main_loop: +{{ define "decode_2_values_x86" }} + // const stream = {{ var "id" }} + // br{{ var "id"}}.fillFast() + MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read + MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value + MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset + + // We must have at least 2 * max tablelog left + CMPQ br_bits_read, $64-22 + JBE skip_fill{{ var "id" }} + + SUBQ $32, br_bits_read // b.bitsRead -= 32 + SUBQ $4, br_offset // b.off -= 4 + + // v := b.in[b.off-4 : b.off] + // v = v[:4] + // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + MOVQ bitReaderShifted_in(br{{ var "id" }}), AX + + // b.value |= uint64(low) << (b.bitsRead & 63) +#ifdef GOAMD64_v3 + SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) +#else + MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) + MOVQ br_bits_read, CX + SHLQ CL, AX +#endif + + ORQ AX, br_value + + // exhausted = exhausted || (br{{ var "id"}}.off < 4) + CMPQ br_offset, $4 + SETLT DL + ORB DL, DH + // } +skip_fill{{ var "id" }}: + + // val0 := br{{ var "id"}}.peekTopBits(peekBits) +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask +#else + MOVQ br_value, AX + MOVQ peek_bits, CX + SHRQ CL, AX // AX = (value >> peek_bits) & mask +#endif + + // v0 := table[val0&mask] + MOVW 0(table)(AX*2), AX // AX - v0 + + // br{{ var "id"}}.advance(uint8(v0.entry)) + MOVB AH, BL // BL = uint8(v0.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n +#endif + + ADDQ CX, br_bits_read // bits_read += n + + +#ifdef GOAMD64_v3 + SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask +#else + // val1 := br{{ var "id"}}.peekTopBits(peekBits) + MOVQ peek_bits, CX + MOVQ br_value, AX + SHRQ CL, AX // AX = (value >> peek_bits) & mask +#endif + + // v1 := table[val1&mask] + MOVW 0(table)(AX*2), AX // AX - v1 + + // br{{ var "id"}}.advance(uint8(v1.entry)) + MOVB AH, BH // BH = uint8(v1.entry >> 8) + +#ifdef GOAMD64_v3 + MOVBQZX AL, CX + SHLXQ AX, br_value, br_value // value <<= n +#else + MOVBQZX AL, CX + SHLQ CL, br_value // value <<= n +#endif + + ADDQ CX, br_bits_read // bits_read += n + + + // these two writes get coalesced + // buf[stream][off] = uint8(v0.entry >> 8) + // buf[stream][off+1] = uint8(v1.entry >> 8) + MOVW BX, {{ var "bufofs" }}(buffer)(off*1) + + // update the bitrader reader structure + MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) + MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) + MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) +{{ end }} + + {{ set "id" "0" }} + {{ set "ofs" "0" }} + {{ set "bufofs" "0" }} {{/* id * bufoff */}} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "1" }} + {{ set "ofs" "8" }} + {{ set "bufofs" "256" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "2" }} + {{ set "ofs" "16" }} + {{ set "bufofs" "512" }} + {{ template "decode_2_values_x86" . }} + + {{ set "id" "3" }} + {{ set "ofs" "24" }} + {{ set "bufofs" "768" }} + {{ template "decode_2_values_x86" . }} + + ADDQ $2, off // off += 2 + + TESTB DH, DH // any br[i].ofs < 4? + JNZ end + + CMPQ off, $bufoff + JL main_loop +end: + MOVQ 0(SP), BP + + MOVB off, ret+56(FP) + RET +#undef off +#undef buffer +#undef table + +#undef br_bits_read +#undef br_value +#undef br_offset +#undef peek_bits +#undef exhausted + +#undef br0 +#undef br1 +#undef br2 +#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000000..126b4d68a94 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,193 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) + out = out[bufoff:] + decoded += bufoff * 4 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 3ee00ecb470..e8ad17ad08e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c8f0f16fc1e..e3445ac194e 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -149,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip This package: file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 cgo zstd: silesia.tar zstd 1 211947520 73605392 543 371.56 @@ -161,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66 silesia.tar zstd 9 211947520 60212393 5063 39.92 gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 GOB stream of binary data. Highly compressible. https://files.klauspost.com/compress/gob-stream.7z file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 The test data for the Large Text Compression Benchmark is the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. http://mattmahoney.net/dc/textdata.html file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 Highly compressible JSON file. https://files.klauspost.com/compress/github-june-2days-2019.json.zst file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 VM Image, Linux mint with a few installed applications: https://files.klauspost.com/compress/rawstudio-mint14.7z file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 CSV data: https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ``` ## Decompressor @@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -357,19 +369,21 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 85445853715..d7cd15ba29d 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -50,16 +51,23 @@ func (b *bitReader) getBits(n uint8) int { if n == 0 /*|| b.bitsRead >= 64 */ { return 0 } - return b.getBitsFast(n) + return int(b.get32BitsFast(n)) } -// getBitsFast requires that at least one bit is requested every time. +// get32BitsFast requires that at least one bit is requested every time. // There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) int { +func (b *bitReader) get32BitsFast(n uint8) uint32 { const regMask = 64 - 1 v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) b.bitsRead += n - return int(v) + return v +} + +func (b *bitReader) get16BitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v } // fillFast() will make sure at least 32 bits are available. @@ -125,6 +133,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 303ae90f944..b3661828509 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -38,7 +38,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) { b.nBits += bits } -// addBits32NC will add up to 32 bits. +// addBits32NC will add up to 31 bits. // It will not check if there is space for them, // so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits32NC(value uint32, bits uint8) { @@ -46,6 +46,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) { b.nBits += bits } +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 8a98c4562e0..7d567a54a05 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -76,17 +76,25 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - sequenceBuf []seq - err error - decWG sync.WaitGroup + err error + + // Check against this crc + checkCRC []byte // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 tmp [4]byte @@ -109,13 +117,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -138,6 +141,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize @@ -158,7 +167,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return ErrCompressedSizeTooBig } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -193,85 +214,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() } -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } -} - -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -294,14 +244,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -311,30 +270,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -350,7 +297,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -361,7 +308,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -372,7 +319,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -382,7 +329,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -393,13 +340,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -407,7 +356,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { @@ -418,7 +367,6 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.literalBuf = make([]byte, litRegenSize) } else { b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - } } } @@ -434,7 +382,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] @@ -442,31 +390,65 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = maxCompressedBlockSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + case literalsBlockCompressed: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litCompSize] in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { b.literalBuf = make([]byte, 0, litRegenSize) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize) } } - if huff == nil { - huff = &huff0.Scratch{} + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = maxCompressedBlockSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -475,27 +457,56 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + err = hist.decoders.decodeSync(hist) + if err != nil { + return err + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { - case seqHeader == 0: - in = in[1:] case seqHeader < 128: nSeqs = int(seqHeader) in = in[1:] @@ -512,19 +523,16 @@ func (b *blockDec) decodeCompressed(hist *history) error { nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) in = in[3:] } - // Allocate sequences - if cap(b.sequenceBuf) < nSeqs { - if b.lowMem { - b.sequenceBuf = make([]seq, nSeqs) - } else { - // Allocate max - b.sequenceBuf = make([]seq, nSeqs, maxSequences) + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) } - } else { - // Reuse buffer - b.sequenceBuf = b.sequenceBuf[:nSeqs] + return ErrUnexpectedBlockSize } - var seqs = &sequenceDecs{} + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -553,6 +561,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -560,34 +571,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec + seq.fse.setRLE(symb) if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -597,140 +610,89 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } return nil } - - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { - return err - } - if debugDecoder { - println("History merged ok") + br := seqs.br + if br == nil { + br = &bitReader{} } - br := &bitReader{} if err := br.init(in); err != nil { return err } - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 3df185ee465..12e8f6f0b61 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -51,7 +51,7 @@ func (b *blockEnc) init() { if cap(b.literals) < maxCompressedBlockSize { b.literals = make([]byte, 0, maxCompressedBlockSize) } - const defSeqs = 200 + const defSeqs = 2000 if cap(b.sequences) < defSeqs { b.sequences = make([]seq, 0, defSeqs) } @@ -426,7 +426,7 @@ func fuzzFseEncoder(data []byte) int { return 0 } enc := fseEncoder{} - hist := enc.Histogram()[:256] + hist := enc.Histogram() maxSym := uint8(0) for i, v := range data { v = v & 63 @@ -722,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) } seq-- - if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { - // No need to flush (common) - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is 8 for all. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // We checked that all can stay within 32 bits - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } - } else { - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is below 8 for each. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // ml+ll = max 32 bits total - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits< math.MaxUint16 { panic("can only encode up to 64K sequences") } // No bounds checks after here: - llH := b.coders.llEnc.Histogram()[:256] - ofH := b.coders.ofEnc.Histogram()[:256] - mlH := b.coders.mlEnc.Histogram()[:256] + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() for i := range llH { llH[i] = 0 } @@ -820,7 +820,8 @@ func (b *blockEnc) genCodes() { } var llMax, ofMax, mlMax uint8 - for i, seq := range b.sequences { + for i := range b.sequences { + seq := &b.sequences[i] v := llCode(seq.litLen) seq.llCode = v llH[v]++ @@ -844,7 +845,6 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) } } - b.sequences[i] = seq } maxCount := func(a []uint32) int { var max uint32 diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index aab71c6cf85..b80191e4b1e 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 69736e8d4bb..5022e71c836 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -5,6 +5,7 @@ package zstd import ( "bytes" + "encoding/binary" "errors" "io" ) @@ -15,18 +16,50 @@ const HeaderMaxSize = 14 + 3 // Header contains information about the first frame and block within that. type Header struct { - // Window Size the window of data to keep while decoding. - // Will only be set if HasFCS is false. - WindowSize uint64 + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool - // Frame content size. - // Expected size of the entire frame. - FrameContentSize uint64 + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 // Dictionary ID. // If 0, no dictionary. DictionaryID uint32 + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + // First block information. FirstBlock struct { // OK will be set if first block could be decoded. @@ -51,17 +84,9 @@ type Header struct { CompressedSize int } - // Skippable will be true if the frame is meant to be skipped. - // No other information will be populated. - Skippable bool - // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. HasCheckSum bool - - // If this is true FrameContentSize will have a valid value - HasFCS bool - - SingleSegment bool } // Decode the header from the beginning of the stream. @@ -71,39 +96,46 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + *h = Header{} if len(in) < 4 { return io.ErrUnexpectedEOF } + h.HeaderSize += 4 b, in := in[:4], in[4:] if !bytes.Equal(b, frameMagic) { if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { return ErrMagicMismatch } - *h = Header{Skippable: true} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) return nil } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { return io.ErrUnexpectedEOF } - - // Clear output - *h = Header{} fhd, in := in[0], in[1:] + h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { return errors.New("reserved bit set on frame header") } - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if !h.SingleSegment { if len(in) < 1 { return io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] + h.HeaderSize++ windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) @@ -120,9 +152,7 @@ func (h *Header) Decode(in []byte) error { return io.ErrUnexpectedEOF } b, in = in[:size], in[size:] - if b == nil { - return io.ErrUnexpectedEOF - } + h.HeaderSize += int(size) switch size { case 1: h.DictionaryID = uint32(b[0]) @@ -152,9 +182,7 @@ func (h *Header) Decode(in []byte) error { return io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] - if b == nil { - return io.ErrUnexpectedEOF - } + h.HeaderSize += int(fcsSize) switch fcsSize { case 1: h.FrameContentSize = uint64(b[0]) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f430f58b572..9fcdaac1dc7 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,13 @@ package zstd import ( - "errors" + "bytes" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,12 +26,19 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -46,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } @@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput if len(d.current.b) > 0 { @@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error { } return nil } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { @@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() { } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. @@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } @@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -307,27 +328,31 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + + if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { return dst, ErrDecoderSizeExceeded } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + if frame.FrameContentSize < 1<<30 { + // Never preallocate more than 1 GB up front. if cap(dst)-len(dst) < int(frame.FrameContentSize) { dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) copy(dst2, dst) @@ -368,33 +393,170 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } if d.current.err != nil { // Keep error state. - return blocking + return false } + d.current.b = d.current.b[:0] + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() if blocking { - d.current.decodeOutput = <-d.current.output + d.current.decodeOutput, ok = <-d.current.output } else { select { - case d.current.decodeOutput = <-d.current.output: + case d.current.decodeOutput, ok = <-d.current.output: default: return false } } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + d.frame.crc.Write(d.current.b) + if d.current.d.Last { + d.current.err = d.frame.checkCRC() + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last } return true } +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + // Close will release all resources. // It is NOT possible to reuse the decoder after this. func (d *Decoder) Close() { @@ -402,10 +564,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -456,100 +618,307 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 4 go routines. +// 0: Read frames and decode blocks. +// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. +// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. +// 3: Wait for stream history, execute sequences, send stream history. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") + br := readerWrapper{r: r} + + var seqPrepare = make(chan *blockDec, d.o.concurrent) + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Prepare blocks... + go func() { + var hist history + var hasErr bool + for block := range seqPrepare { + if hasErr { + if block != nil { + seqDecode <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history") + } + hist.reset() + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + continue + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) + close(seqDecode) + }() + + // Async 2: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history, recent:", block.async.newHist.recentOffsets) + } + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) } - break + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize } - if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} + seqExecute <- block + } + close(seqExecute) + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 3: new history") + } + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) } - break decodeStream - default: } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + }() + +decodeStream: + for { + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + seqPrepare <- dec + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err + } + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) + } + } + err = dec.err + last := dec.Last + seqPrepare <- dec + if err != nil { + break decodeStream + } + if last { + break + } } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqPrepare) + wg.Wait() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 95cc9b8b81f..fd05c9bb012 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -28,6 +28,9 @@ func (o *decoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), maxWindowSize: MaxWindowSize, } + if o.concurrent > 4 { + o.concurrent = 4 + } o.maxDecodedSize = 1 << 63 } @@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 295cd602a42..15ae8ee8077 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -108,11 +108,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) { e.blk = enc } -func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - func (e *fastBase) matchlen(s, t int32, src []byte) int32 { if debugAsserts { if s < 0 { @@ -131,9 +126,24 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } + a := src[s:] + b := src[t:] + b = b[:len(a)] + end := int32((len(a) >> 3) << 3) + for i := int32(0); i < end; i += 8 { + if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { + return i + int32(bits.TrailingZeros64(diff)>>3) + } + } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) + a = a[end:] + b = b[end:] + for i := range a { + if a[i] != b[i] { + return int32(i) + end + } + } + return int32(len(a)) + end } // Reset the encoding table. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f2502629bc5..f51ab529a0b 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -6,8 +6,6 @@ package zstd import ( "fmt" - "math" - "math/bits" ) const ( @@ -87,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 7 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -136,20 +134,7 @@ encodeLoop: // Consider history as well. var seq seq var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -236,20 +221,7 @@ encodeLoop: } // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -286,20 +258,7 @@ encodeLoop: if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. nextHash := hashLen(cv, hashLog, tableFastHashLen) @@ -375,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { // TEMPLATE const hashLog = tableBits // seems global, but would be nice to tweak. - const kSearchStrength = 8 + const kSearchStrength = 6 // nextEmit is where in src the next emitLiteral should start from. nextEmit := s @@ -418,21 +377,7 @@ encodeLoop: if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - // length := 4 + e.matchlen(s+6, repIndex+4, src) - // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) - var length int32 - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -522,21 +467,7 @@ encodeLoop: panic(fmt.Sprintf("t (%d) < 0 ", t)) } // Extend the 4-byte match as long as possible. - //l := e.matchlenNoHist(s+4, t+4, src) + 4 - // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -573,21 +504,7 @@ encodeLoop: if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlenNoHist(s+4, o2+4, src) - // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. nextHash := hashLen(cv, hashLog, tableFastHashLen) @@ -731,19 +648,7 @@ encodeLoop: // Consider history as well. var seq seq var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + length = 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -831,20 +736,7 @@ encodeLoop: } // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards tMin := s - e.maxMatchOff @@ -881,20 +773,7 @@ encodeLoop: if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { // We have at least 4 byte match. // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } + l := 4 + e.matchlen(s+4, o2+4, src) // Store this, since we have it. nextHash := hashLen(cv, hashLog, tableFastHashLen) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index e6e315969b0..dcc987a7cb6 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 7d29e1d689e..44d8dbd199a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -24,6 +24,7 @@ type encoderOptions struct { allLitEntropy bool customWindow bool customALEntropy bool + customBlockSize bool lowMem bool dict *dict } @@ -33,7 +34,7 @@ func (o *encoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), crc: true, single: nil, - blockSize: 1 << 16, + blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, allLitEntropy: true, @@ -75,6 +76,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { @@ -106,6 +108,7 @@ func WithWindowSize(n int) EOption { o.customWindow = true if o.blockSize > o.windowSize { o.blockSize = o.windowSize + o.customBlockSize = true } return nil } @@ -188,10 +191,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel { return SpeedDefault case level >= 6 && level < 10: return SpeedBetterCompression - case level >= 10: + default: return SpeedBestCompression } - return SpeedDefault } // String provides a string representation of the compression level. @@ -222,6 +224,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { switch o.level { case SpeedFastest: o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 989c79f8c31..11089d22328 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -8,23 +8,17 @@ import ( "bytes" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -34,15 +28,10 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( @@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error { default: fcsSize = 1 << v } - d.FrameContentSize = 0 + d.FrameContentSize = fcsUnknown if fcsSize > 0 { b, err := br.readSmall(fcsSize) if err != nil { @@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error { d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error { } d.history.windowSize = int(d.WindowSize) if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + d.history.allocFrameBuffer = d.history.windowSize * 2 + // TODO: Maybe use FrameContent size } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { if debugDecoder { - printf("decoding new block %p:%p", block, block.data) + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { @@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error { return err } - if !bytes.Equal(tmp[:], want) { + if !bytes.Equal(tmp[:], want) && !ignoreCRC { if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } @@ -352,131 +317,13 @@ func (d *frameDec) checkCRC() error { return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history - } - block = next - } -} - // runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) var err error @@ -489,22 +336,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { err = ErrDecoderSizeExceeded break } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) err = ErrFrameSizeExceeded break } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { var n int n, err = d.crc.Write(dst[crcStart:]) if err == nil { diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index e6d3d49b39c..bb3d4fd6c31 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -379,7 +379,7 @@ func (s decSymbol) final() (int, uint8) { // This can only be used if no symbols are 0 bits. // At least tablelog bits must be available in the bit reader. func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits())) + lowBits := br.get16BitsFast(s.state.nbBits()) s.state = s.dt[s.state.newState()+lowBits] return s.state.baseline(), s.state.addBits() } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index b4757ee3f03..5442061b18d 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -62,9 +62,8 @@ func (s symbolTransform) String() string { // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *fseEncoder) Histogram() []uint32 { - return s.count[:] +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go new file mode 100644 index 00000000000..7f2210e0530 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go @@ -0,0 +1,11 @@ +//go:build ignorecrc +// +build ignorecrc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = true diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go new file mode 100644 index 00000000000..6811c68a893 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go @@ -0,0 +1,11 @@ +//go:build !ignorecrc +// +build !ignorecrc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = false diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d251..28b40153cc2 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,20 +10,31 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { @@ -35,7 +46,7 @@ func (h *history) reset() { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } - h.decoders = sequenceDecs{} + h.decoders = sequenceDecs{br: h.decoders.br} if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) @@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +95,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index be8db5bf796..cea17856197 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,6 +1,7 @@ // +build !appengine // +build gc // +build !purego +// +build !noasm #include "textflag.h" diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 00000000000..4d64a17d69c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,186 @@ +// +build gc,!purego,!noasm + +#include "textflag.h" + +// Register allocation. +#define digest R1 +#define h R2 // Return value. +#define p R3 // Input pointer. +#define len R4 +#define nblocks R5 // len / 32. +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc \ + +// x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x \ + +#define mergeRound(x) \ + round0(x) \ + EOR x, h \ + MADD h, prime4, prime1, h \ + +// Update v[1-4] with 32-byte blocks. Assumes len >= 32. +#define blocksLoop() \ + LSR $5, len, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 32(p), (x1, x2) \ + round(v1, x1) \ + LDP -16(p), (x3, x4) \ + round(v2, x2) \ + SUB $1, nblocks \ + round(v3, x3) \ + round(v4, x4) \ + CBNZ nblocks, loop \ + +// The primes are repeated here to ensure that they're stored +// in a contiguous array, so we can load them with LDP. +DATA primes<> +0(SB)/8, $11400714785074694791 +DATA primes<> +8(SB)/8, $14029467366897019727 +DATA primes<>+16(SB)/8, $1609587929392839161 +DATA primes<>+24(SB)/8, $9650029242287828579 +DATA primes<>+32(SB)/8, $2870177450012600261 +GLOBL primes<>(SB), NOPTR+RODATA, $40 + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 + LDP b_base+0(FP), (p, len) + + LDP primes<> +0(SB), (prime1, prime2) + LDP primes<>+16(SB), (prime3, prime4) + MOVD primes<>+32(SB), prime5 + + CMP $32, len + CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } + BLO afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blocksLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(v1) + mergeRound(v2) + mergeRound(v3) + mergeRound(v4) + +afterLoop: + ADD len, h + + TBZ $4, len, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, len, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, len, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, len, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h + MUL prime1, h + +try1: + TBZ $0, len, end + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h + MUL prime1, h + +end: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +// +// Assumes len(b) >= 32. +TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 + LDP primes<>(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, len) + + blocksLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, len + MOVD len, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go similarity index 51% rename from vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go rename to vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 0ae847f75b0..1a1fac9c261 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -1,5 +1,9 @@ -//go:build !appengine && gc && !purego -// +build !appengine,gc,!purego +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 1f52f296e71..209cb4a999c 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -1,5 +1,5 @@ -//go:build !amd64 || appengine || !gc || purego -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm package xxhash diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 1dd39e63b7e..819f1461b70 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,18 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +86,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -94,11 +99,261 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] } // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(history *history) error { + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + hist := history.b[history.ignoreBuffer:] + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +406,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +431,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) + return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +484,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +516,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -278,7 +529,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { mlState = mlTable[mlState.newState()&maxTableMask] ofState = ofTable[ofState.newState()&maxTableMask] } else { - bits := br.getBitsFast(nBits) + bits := br.get32BitsFast(nBits) lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -291,9 +542,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } + // Check if space for literals + if len(s.literals)+len(s.out)-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize) + } + // Add final literals - s.out = append(s.out, s.literals...) - return nil + s.out = append(out, s.literals...) + return br.close() } // update states, at least 27 bits must be available. @@ -326,7 +582,7 @@ func (s *sequenceDecs) updateAlt(br *bitReader) { s.offsets.state.state = s.offsets.state.dt[c.newState()] return } - bits := br.getBitsFast(nBits) + bits := br.get32BitsFast(nBits) lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] @@ -457,36 +713,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 967f29b3120..ffffcbc254e 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -20,7 +20,7 @@ const ZipMethodPKWare = 20 var zipReaderPool sync.Pool -// newZipReader cannot be used since we would leak goroutines... +// newZipReader creates a pooled zip decompressor. func newZipReader(r io.Reader) io.ReadCloser { dec, ok := zipReaderPool.Get().(*Decoder) if ok { @@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) { r.mu.Lock() defer r.mu.Unlock() if r.dec == nil { - return 0, errors.New("Read after Close") + return 0, errors.New("read after close or EOF") } dec, err := r.dec.Read(p) - + if err == io.EOF { + err = r.dec.Reset(nil) + zipReaderPool.Put(r.dec) + r.dec = nil + } return dec, err } @@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } + return newZipReader } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index ef1d49a009c..c1c90b4a072 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -39,6 +39,9 @@ const zstdMinMatch = 3 // Reset the buffer offset when reaching this. const bufferReset = math.MaxInt32 - MaxWindowSize +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + var ( // ErrReservedBlockType is returned when a reserved block type is found. // Typically this indicates wrong or corrupted input. @@ -52,6 +55,10 @@ var ( // Typically returned on invalid input. ErrBlockTooSmall = errors.New("block too small") + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. // Typically this indicates wrong or corrupted input. ErrMagicMismatch = errors.New("invalid input: magic number mismatch") @@ -75,6 +82,10 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml deleted file mode 100644 index 7942c565ce6..00000000000 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.13.x - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) - diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index e055952b667..ca0483711c9 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,6 +1,6 @@ # go-colorable -[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) [![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) [![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 1f7806fe16b..416d1bbbf83 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 08cbd1e0fa2..766d94603ac 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -// +build !windows -// +build !appengine +//go:build !windows && !appengine +// +build !windows,!appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 41215d7fc4f..1846ad5ab41 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -1,5 +1,5 @@ -// +build windows -// +build !appengine +//go:build windows && !appengine +// +build windows,!appengine package colorable @@ -452,18 +452,22 @@ func (w *Writer) Write(data []byte) (n int, err error) { } else { er = bytes.NewReader(data) } - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go index 95f2c6be257..05d6f74bf6b 100644 --- a/vendor/github.com/mattn/go-colorable/noncolorable.go +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -18,18 +18,22 @@ func NewNonColorable(w io.Writer) io.Writer { // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) - var bw [1]byte + var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { + plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { - bw[0] = c1 - w.out.Write(bw[:]) + plaintext.WriteByte(c1) continue } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } c2, err := er.ReadByte() if err != nil { break loop @@ -38,7 +42,6 @@ loop: continue } - var buf bytes.Buffer for { c, err := er.ReadByte() if err != nil { @@ -47,7 +50,6 @@ loop: if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { break } - buf.Write([]byte(string(c))) } } diff --git a/vendor/github.com/miekg/pkcs11/.travis.yml b/vendor/github.com/miekg/pkcs11/.travis.yml deleted file mode 100644 index 687044d830a..00000000000 --- a/vendor/github.com/miekg/pkcs11/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: required -dist: trusty - -go: - - 1.9 - - tip - -script: - - go test -v ./... - -before_script: - - sudo apt-get update - - sudo apt-get -y install libsofthsm diff --git a/vendor/github.com/miekg/pkcs11/README.md b/vendor/github.com/miekg/pkcs11/README.md index 0a5c1b7b6e7..18a361a9903 100644 --- a/vendor/github.com/miekg/pkcs11/README.md +++ b/vendor/github.com/miekg/pkcs11/README.md @@ -1,6 +1,6 @@ -# PKCS#11 [![Build Status](https://travis-ci.org/miekg/pkcs11.png?branch=master)](https://travis-ci.org/miekg/pkcs11) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/miekg/pkcs11) +# PKCS#11 -This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom were +This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom where it makes sense. It has been tested with SoftHSM. ## SoftHSM @@ -13,10 +13,10 @@ it makes sense. It has been tested with SoftHSM. softhsm --init-token --slot 0 --label test --pin 1234 ~~~ - * Then use `libsofthsm.so` as the pkcs11 module: + * Then use `libsofthsm2.so` as the pkcs11 module: ~~~ go - p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so") + p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so") ~~~ ## Examples @@ -24,7 +24,7 @@ it makes sense. It has been tested with SoftHSM. A skeleton program would look somewhat like this (yes, pkcs#11 is verbose): ~~~ go -p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so") +p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so") err := p.Initialize() if err != nil { panic(err) diff --git a/vendor/github.com/miekg/pkcs11/pkcs11.go b/vendor/github.com/miekg/pkcs11/pkcs11.go index e21d23b73e6..e1b5824ec89 100644 --- a/vendor/github.com/miekg/pkcs11/pkcs11.go +++ b/vendor/github.com/miekg/pkcs11/pkcs11.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run const_generate.go + // Package pkcs11 is a wrapper around the PKCS#11 cryptographic library. package pkcs11 @@ -14,7 +16,7 @@ package pkcs11 #cgo windows CFLAGS: -DPACKED_STRUCTURES #cgo linux LDFLAGS: -ldl #cgo darwin LDFLAGS: -ldl -#cgo openbsd LDFLAGS: -ldl +#cgo openbsd LDFLAGS: #cgo freebsd LDFLAGS: -ldl #include @@ -770,9 +772,10 @@ static inline CK_VOID_PTR getAttributePval(CK_ATTRIBUTE_PTR a) */ import "C" -import "strings" - -import "unsafe" +import ( + "strings" + "unsafe" +) // Ctx contains the current pkcs11 context. type Ctx struct { diff --git a/vendor/github.com/miekg/pkcs11/release.go b/vendor/github.com/miekg/pkcs11/release.go index 4380f374d28..d8b99f147ea 100644 --- a/vendor/github.com/miekg/pkcs11/release.go +++ b/vendor/github.com/miekg/pkcs11/release.go @@ -1,3 +1,4 @@ +//go:build release // +build release package pkcs11 @@ -5,7 +6,7 @@ package pkcs11 import "fmt" // Release is current version of the pkcs11 library. -var Release = R{1, 0, 3} +var Release = R{1, 1, 1} // R holds the version of this library. type R struct { diff --git a/vendor/github.com/miekg/pkcs11/types.go b/vendor/github.com/miekg/pkcs11/types.go index 970db9061b6..60eadcb71bb 100644 --- a/vendor/github.com/miekg/pkcs11/types.go +++ b/vendor/github.com/miekg/pkcs11/types.go @@ -182,8 +182,20 @@ func NewAttribute(typ uint, x interface{}) *Attribute { } case int: a.Value = uintToBytes(uint64(v)) + case int16: + a.Value = uintToBytes(uint64(v)) + case int32: + a.Value = uintToBytes(uint64(v)) + case int64: + a.Value = uintToBytes(uint64(v)) case uint: a.Value = uintToBytes(uint64(v)) + case uint16: + a.Value = uintToBytes(uint64(v)) + case uint32: + a.Value = uintToBytes(uint64(v)) + case uint64: + a.Value = uintToBytes(uint64(v)) case string: a.Value = []byte(v) case []byte: diff --git a/vendor/github.com/miekg/pkcs11/const.go b/vendor/github.com/miekg/pkcs11/zconst.go similarity index 85% rename from vendor/github.com/miekg/pkcs11/const.go rename to vendor/github.com/miekg/pkcs11/zconst.go index 40885614616..41df5cfcf0c 100644 --- a/vendor/github.com/miekg/pkcs11/const.go +++ b/vendor/github.com/miekg/pkcs11/zconst.go @@ -2,48 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package pkcs11 - -const ( - CKU_SO uint = 0 - CKU_USER uint = 1 - CKU_CONTEXT_SPECIFIC uint = 2 -) - -const ( - CKO_DATA uint = 0x00000000 - CKO_CERTIFICATE uint = 0x00000001 - CKO_PUBLIC_KEY uint = 0x00000002 - CKO_PRIVATE_KEY uint = 0x00000003 - CKO_SECRET_KEY uint = 0x00000004 - CKO_HW_FEATURE uint = 0x00000005 - CKO_DOMAIN_PARAMETERS uint = 0x00000006 - CKO_MECHANISM uint = 0x00000007 - CKO_OTP_KEY uint = 0x00000008 - CKO_VENDOR_DEFINED uint = 0x80000000 -) - -const ( - CKG_MGF1_SHA1 uint = 0x00000001 - CKG_MGF1_SHA224 uint = 0x00000005 - CKG_MGF1_SHA256 uint = 0x00000002 - CKG_MGF1_SHA384 uint = 0x00000003 - CKG_MGF1_SHA512 uint = 0x00000004 - CKG_MGF1_SHA3_224 uint = 0x00000006 - CKG_MGF1_SHA3_256 uint = 0x00000007 - CKG_MGF1_SHA3_384 uint = 0x00000008 - CKG_MGF1_SHA3_512 uint = 0x00000009 -) - -const ( - CKZ_DATA_SPECIFIED uint = 0x00000001 -) +// Code generated by "go run const_generate.go"; DO NOT EDIT. -// Generated with: awk '/#define CK[AFKMRC]/{ print $2 " = " $3 }' pkcs11t.h | sed -e 's/UL$//g' -e 's/UL)$/)/g' +package pkcs11 -// All the flag (CKF_), attribute (CKA_), error code (CKR_), key type (CKK_), certificate type (CKC_) and -// mechanism (CKM_) constants as defined in PKCS#11. const ( + CK_TRUE = 1 + CK_FALSE = 0 + CK_UNAVAILABLE_INFORMATION = ^uint(0) + CK_EFFECTIVELY_INFINITE = 0 + CK_INVALID_HANDLE = 0 + CKN_SURRENDER = 0 + CKN_OTP_CHANGED = 1 CKF_TOKEN_PRESENT = 0x00000001 CKF_REMOVABLE_DEVICE = 0x00000002 CKF_HW_SLOT = 0x00000004 @@ -66,12 +36,34 @@ const ( CKF_SO_PIN_LOCKED = 0x00400000 CKF_SO_PIN_TO_BE_CHANGED = 0x00800000 CKF_ERROR_STATE = 0x01000000 + CKU_SO = 0 + CKU_USER = 1 + CKU_CONTEXT_SPECIFIC = 2 + CKS_RO_PUBLIC_SESSION = 0 + CKS_RO_USER_FUNCTIONS = 1 + CKS_RW_PUBLIC_SESSION = 2 + CKS_RW_USER_FUNCTIONS = 3 + CKS_RW_SO_FUNCTIONS = 4 CKF_RW_SESSION = 0x00000002 CKF_SERIAL_SESSION = 0x00000004 + CKO_DATA = 0x00000000 + CKO_CERTIFICATE = 0x00000001 + CKO_PUBLIC_KEY = 0x00000002 + CKO_PRIVATE_KEY = 0x00000003 + CKO_SECRET_KEY = 0x00000004 + CKO_HW_FEATURE = 0x00000005 + CKO_DOMAIN_PARAMETERS = 0x00000006 + CKO_MECHANISM = 0x00000007 + CKO_OTP_KEY = 0x00000008 + CKO_VENDOR_DEFINED = 0x80000000 + CKH_MONOTONIC_COUNTER = 0x00000001 + CKH_CLOCK = 0x00000002 + CKH_USER_INTERFACE = 0x00000003 + CKH_VENDOR_DEFINED = 0x80000000 CKK_RSA = 0x00000000 CKK_DSA = 0x00000001 CKK_DH = 0x00000002 - CKK_ECDSA = 0x00000003 + CKK_ECDSA = 0x00000003 // Deprecated CKK_EC = 0x00000003 CKK_X9_42_DH = 0x00000004 CKK_KEA = 0x00000005 @@ -83,7 +75,7 @@ const ( CKK_DES3 = 0x00000015 CKK_CAST = 0x00000016 CKK_CAST3 = 0x00000017 - CKK_CAST5 = 0x00000018 + CKK_CAST5 = 0x00000018 // Deprecated CKK_CAST128 = 0x00000018 CKK_RC5 = 0x00000019 CKK_IDEA = 0x0000001A @@ -99,14 +91,14 @@ const ( CKK_ACTI = 0x00000024 CKK_CAMELLIA = 0x00000025 CKK_ARIA = 0x00000026 - CKK_SHA512_224_HMAC = 0x00000027 - CKK_SHA512_256_HMAC = 0x00000028 - CKK_SHA512_T_HMAC = 0x00000029 + CKK_MD5_HMAC = 0x00000027 CKK_SHA_1_HMAC = 0x00000028 - CKK_SHA224_HMAC = 0x0000002E + CKK_RIPEMD128_HMAC = 0x00000029 + CKK_RIPEMD160_HMAC = 0x0000002A CKK_SHA256_HMAC = 0x0000002B CKK_SHA384_HMAC = 0x0000002C CKK_SHA512_HMAC = 0x0000002D + CKK_SHA224_HMAC = 0x0000002E CKK_SEED = 0x0000002F CKK_GOSTR3410 = 0x00000030 CKK_GOSTR3411 = 0x00000031 @@ -116,11 +108,26 @@ const ( CKK_SHA3_384_HMAC = 0x00000035 CKK_SHA3_512_HMAC = 0x00000036 CKK_VENDOR_DEFINED = 0x80000000 + CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0 + CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1 + CK_CERTIFICATE_CATEGORY_AUTHORITY = 2 + CK_CERTIFICATE_CATEGORY_OTHER_ENTITY = 3 + CK_SECURITY_DOMAIN_UNSPECIFIED = 0 + CK_SECURITY_DOMAIN_MANUFACTURER = 1 + CK_SECURITY_DOMAIN_OPERATOR = 2 + CK_SECURITY_DOMAIN_THIRD_PARTY = 3 CKC_X_509 = 0x00000000 CKC_X_509_ATTR_CERT = 0x00000001 CKC_WTLS = 0x00000002 CKC_VENDOR_DEFINED = 0x80000000 CKF_ARRAY_ATTRIBUTE = 0x40000000 + CK_OTP_FORMAT_DECIMAL = 0 + CK_OTP_FORMAT_HEXADECIMAL = 1 + CK_OTP_FORMAT_ALPHANUMERIC = 2 + CK_OTP_FORMAT_BINARY = 3 + CK_OTP_PARAM_IGNORED = 0 + CK_OTP_PARAM_OPTIONAL = 1 + CK_OTP_PARAM_MANDATORY = 2 CKA_CLASS = 0x00000000 CKA_TOKEN = 0x00000001 CKA_PRIVATE = 0x00000002 @@ -183,15 +190,16 @@ const ( CKA_MODIFIABLE = 0x00000170 CKA_COPYABLE = 0x00000171 CKA_DESTROYABLE = 0x00000172 - CKA_ECDSA_PARAMS = 0x00000180 + CKA_ECDSA_PARAMS = 0x00000180 // Deprecated CKA_EC_PARAMS = 0x00000180 CKA_EC_POINT = 0x00000181 - CKA_SECONDARY_AUTH = 0x00000200 - CKA_AUTH_PIN_FLAGS = 0x00000201 + CKA_SECONDARY_AUTH = 0x00000200 // Deprecated + CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated CKA_ALWAYS_AUTHENTICATE = 0x00000202 CKA_WRAP_WITH_TRUSTED = 0x00000210 - CKA_WRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000211 - CKA_UNWRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000212 + CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211) + CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212) + CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213) CKA_OTP_FORMAT = 0x00000220 CKA_OTP_LENGTH = 0x00000221 CKA_OTP_TIME_INTERVAL = 0x00000222 @@ -226,7 +234,7 @@ const ( CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501 CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502 CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503 - CKA_ALLOWED_MECHANISMS = CKF_ARRAY_ATTRIBUTE | 0x00000600 + CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600) CKA_VENDOR_DEFINED = 0x80000000 CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000 CKM_RSA_PKCS = 0x00000001 @@ -246,11 +254,10 @@ const ( CKM_DSA_KEY_PAIR_GEN = 0x00000010 CKM_DSA = 0x00000011 CKM_DSA_SHA1 = 0x00000012 - CKM_DSA_FIPS_G_GEN = 0x00000013 - CKM_DSA_SHA224 = 0x00000014 - CKM_DSA_SHA256 = 0x00000015 - CKM_DSA_SHA384 = 0x00000016 - CKM_DSA_SHA512 = 0x00000017 + CKM_DSA_SHA224 = 0x00000013 + CKM_DSA_SHA256 = 0x00000014 + CKM_DSA_SHA384 = 0x00000015 + CKM_DSA_SHA512 = 0x00000016 CKM_DSA_SHA3_224 = 0x00000018 CKM_DSA_SHA3_256 = 0x00000019 CKM_DSA_SHA3_384 = 0x0000001A @@ -387,13 +394,13 @@ const ( CKM_CAST128_KEY_GEN = 0x00000320 CKM_CAST5_ECB = 0x00000321 CKM_CAST128_ECB = 0x00000321 - CKM_CAST5_CBC = 0x00000322 + CKM_CAST5_CBC = 0x00000322 // Deprecated CKM_CAST128_CBC = 0x00000322 - CKM_CAST5_MAC = 0x00000323 + CKM_CAST5_MAC = 0x00000323 // Deprecated CKM_CAST128_MAC = 0x00000323 - CKM_CAST5_MAC_GENERAL = 0x00000324 + CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated CKM_CAST128_MAC_GENERAL = 0x00000324 - CKM_CAST5_CBC_PAD = 0x00000325 + CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated CKM_CAST128_CBC_PAD = 0x00000325 CKM_RC5_KEY_GEN = 0x00000330 CKM_RC5_ECB = 0x00000331 @@ -441,9 +448,9 @@ const ( CKM_PBE_MD5_DES_CBC = 0x000003A1 CKM_PBE_MD5_CAST_CBC = 0x000003A2 CKM_PBE_MD5_CAST3_CBC = 0x000003A3 - CKM_PBE_MD5_CAST5_CBC = 0x000003A4 + CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated CKM_PBE_MD5_CAST128_CBC = 0x000003A4 - CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 + CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated CKM_PBE_SHA1_CAST128_CBC = 0x000003A5 CKM_PBE_SHA1_RC4_128 = 0x000003A6 CKM_PBE_SHA1_RC4_40 = 0x000003A7 @@ -522,7 +529,7 @@ const ( CKM_BATON_COUNTER = 0x00001034 CKM_BATON_SHUFFLE = 0x00001035 CKM_BATON_WRAP = 0x00001036 - CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 + CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated CKM_EC_KEY_PAIR_GEN = 0x00001040 CKM_ECDSA = 0x00001041 CKM_ECDSA_SHA1 = 0x00001042 @@ -551,9 +558,9 @@ const ( CKM_AES_CTR = 0x00001086 CKM_AES_GCM = 0x00001087 CKM_AES_CCM = 0x00001088 - CKM_AES_CMAC_GENERAL = 0x00001089 + CKM_AES_CTS = 0x00001089 CKM_AES_CMAC = 0x0000108A - CKM_AES_CTS = 0x0000108B + CKM_AES_CMAC_GENERAL = 0x0000108B CKM_AES_XCBC_MAC = 0x0000108C CKM_AES_XCBC_MAC_96 = 0x0000108D CKM_AES_GMAC = 0x0000108E @@ -704,33 +711,56 @@ const ( CKR_MUTEX_NOT_LOCKED = 0x000001A1 CKR_NEW_PIN_MODE = 0x000001B0 CKR_NEXT_OTP = 0x000001B1 - CKR_EXCEEDED_MAX_ITERATIONS = 0x000001C0 - CKR_FIPS_SELF_TEST_FAILED = 0x000001C1 - CKR_LIBRARY_LOAD_FAILED = 0x000001C2 - CKR_PIN_TOO_WEAK = 0x000001C3 - CKR_PUBLIC_KEY_INVALID = 0x000001C4 + CKR_EXCEEDED_MAX_ITERATIONS = 0x000001B5 + CKR_FIPS_SELF_TEST_FAILED = 0x000001B6 + CKR_LIBRARY_LOAD_FAILED = 0x000001B7 + CKR_PIN_TOO_WEAK = 0x000001B8 + CKR_PUBLIC_KEY_INVALID = 0x000001B9 CKR_FUNCTION_REJECTED = 0x00000200 CKR_VENDOR_DEFINED = 0x80000000 CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001 CKF_OS_LOCKING_OK = 0x00000002 CKF_DONT_BLOCK = 1 + CKG_MGF1_SHA1 = 0x00000001 + CKG_MGF1_SHA256 = 0x00000002 + CKG_MGF1_SHA384 = 0x00000003 + CKG_MGF1_SHA512 = 0x00000004 + CKG_MGF1_SHA224 = 0x00000005 + CKZ_DATA_SPECIFIED = 0x00000001 + CKD_NULL = 0x00000001 + CKD_SHA1_KDF = 0x00000002 + CKD_SHA1_KDF_ASN1 = 0x00000003 + CKD_SHA1_KDF_CONCATENATE = 0x00000004 + CKD_SHA224_KDF = 0x00000005 + CKD_SHA256_KDF = 0x00000006 + CKD_SHA384_KDF = 0x00000007 + CKD_SHA512_KDF = 0x00000008 + CKD_CPDIVERSIFY_KDF = 0x00000009 + CKD_SHA3_224_KDF = 0x0000000A + CKD_SHA3_256_KDF = 0x0000000B + CKD_SHA3_384_KDF = 0x0000000C + CKD_SHA3_512_KDF = 0x0000000D + CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001 + CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002 + CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003 + CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004 + CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005 + CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006 + CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007 + CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008 + CKZ_SALT_SPECIFIED = 0x00000001 + CK_OTP_VALUE = 0 + CK_OTP_PIN = 1 + CK_OTP_CHALLENGE = 2 + CK_OTP_TIME = 3 + CK_OTP_COUNTER = 4 + CK_OTP_FLAGS = 5 + CK_OTP_OUTPUT_LENGTH = 6 + CK_OTP_OUTPUT_FORMAT = 7 CKF_NEXT_OTP = 0x00000001 CKF_EXCLUDE_TIME = 0x00000002 CKF_EXCLUDE_COUNTER = 0x00000004 CKF_EXCLUDE_CHALLENGE = 0x00000008 CKF_EXCLUDE_PIN = 0x00000010 CKF_USER_FRIENDLY_OTP = 0x00000020 - CKD_NULL = 0x00000001 - CKD_SHA1_KDF = 0x00000002 -) - -// Special return values defined in PKCS#11 v2.40 section 3.2. -const ( - // CK_EFFECTIVELY_INFINITE may be returned in the CK_TOKEN_INFO fields ulMaxSessionCount and ulMaxRwSessionCount. - // It indicates there is no practical limit on the number of sessions. - CK_EFFECTIVELY_INFINITE = 0 - - // CK_UNAVAILABLE_INFORMATION may be returned for several fields within CK_TOKEN_INFO. It indicates - // the token is unable or unwilling to provide the requested information. - CK_UNAVAILABLE_INFORMATION = ^uint(0) ) diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 9fe803a5e90..38a099162ca 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,7 @@ +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + ## 1.4.2 * Custom name matchers to support any sort of casing, formatting, etc. for diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index dcee0f2d634..6b81b00679e 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -684,16 +684,12 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e } case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": jn := data.(json.Number) - i, err := jn.Int64() + i, err := strconv.ParseUint(string(jn), 0, 64) if err != nil { return fmt.Errorf( "error decoding json.Number into %s: %s", name, err) } - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) + val.SetUint(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go index bc9f6b2ad3d..bf221e687f1 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go @@ -7,6 +7,34 @@ import ( "golang.org/x/sys/unix" ) +// MountedFast is a method of detecting a mount point without reading +// mountinfo from procfs. A caller can only trust the result if no error +// and sure == true are returned. Otherwise, other methods (e.g. parsing +// /proc/mounts) have to be used. If unsure, use Mounted instead (which +// uses MountedFast, but falls back to parsing mountinfo if needed). +// +// If a non-existent path is specified, an appropriate error is returned. +// In case the caller is not interested in this particular error, it should +// be handled separately using e.g. errors.Is(err, os.ErrNotExist). +// +// This function is only available on Linux. When available (since kernel +// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise, +// the implementation falls back to using stat(2), which can reliably detect +// normal (but not bind) mounts. +func MountedFast(path string) (mounted, sure bool, err error) { + // Root is always mounted. + if path == string(os.PathSeparator) { + return true, true, nil + } + + path, err = normalizePath(path) + if err != nil { + return false, false, err + } + mounted, sure, err = mountedFast(path) + return +} + // mountedByOpenat2 is a method of detecting a mount that works for all kinds // of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel. func mountedByOpenat2(path string) (bool, error) { @@ -16,9 +44,6 @@ func mountedByOpenat2(path string) (bool, error) { Flags: unix.O_PATH | unix.O_CLOEXEC, }) if err != nil { - if err == unix.ENOENT { // not a mount - return false, nil - } return false, &os.PathError{Op: "openat2", Path: dir, Err: err} } fd, err := unix.Openat2(dirfd, last, &unix.OpenHow{ @@ -26,33 +51,51 @@ func mountedByOpenat2(path string) (bool, error) { Resolve: unix.RESOLVE_NO_XDEV, }) _ = unix.Close(dirfd) - switch err { + switch err { //nolint:errorlint // unix errors are bare case nil: // definitely not a mount _ = unix.Close(fd) return false, nil case unix.EXDEV: // definitely a mount return true, nil - case unix.ENOENT: // not a mount - return false, nil } // not sure return false, &os.PathError{Op: "openat2", Path: path, Err: err} } -func mounted(path string) (bool, error) { +// mountedFast is similar to MountedFast, except it expects a normalized path. +func mountedFast(path string) (mounted, sure bool, err error) { + // Root is always mounted. + if path == string(os.PathSeparator) { + return true, true, nil + } + // Try a fast path, using openat2() with RESOLVE_NO_XDEV. - mounted, err := mountedByOpenat2(path) + mounted, err = mountedByOpenat2(path) if err == nil { - return mounted, nil + return mounted, true, nil } + // Another fast path: compare st.st_dev fields. mounted, err = mountedByStat(path) // This does not work for bind mounts, so false negative // is possible, therefore only trust if return is true. if mounted && err == nil { + return true, true, nil + } + + return +} + +func mounted(path string) (bool, error) { + path, err := normalizePath(path) + if err != nil { + return false, err + } + mounted, sure, err := mountedFast(path) + if sure && err == nil { return mounted, nil } - // Fallback to parsing mountinfo + // Fallback to parsing mountinfo. return mountedByMountinfo(path) } diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go index efb03978b1e..45ddad236f3 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go @@ -1,9 +1,9 @@ -// +build linux freebsd,cgo openbsd,cgo +//go:build linux || (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo) +// +build linux freebsd,cgo openbsd,cgo darwin,cgo package mountinfo import ( - "errors" "fmt" "os" "path/filepath" @@ -15,10 +15,6 @@ func mountedByStat(path string) (bool, error) { var st unix.Stat_t if err := unix.Lstat(path, &st); err != nil { - if err == unix.ENOENT { - // Treat ENOENT as "not mounted". - return false, nil - } return false, &os.PathError{Op: "stat", Path: path, Err: err} } dev := st.Dev @@ -49,14 +45,6 @@ func normalizePath(path string) (realPath string, err error) { } func mountedByMountinfo(path string) (bool, error) { - path, err := normalizePath(path) - if err != nil { - if errors.Is(err, unix.ENOENT) { - // treat ENOENT as "not mounted" - return false, nil - } - return false, err - } entries, err := GetMounts(SingleEntryFilter(path)) if err != nil { return false, err diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go index 403a893310b..c7e5cb42aca 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go @@ -10,11 +10,12 @@ func GetMounts(f FilterFunc) ([]*Info, error) { return parseMountTable(f) } -// Mounted determines if a specified path is a mount point. +// Mounted determines if a specified path is a mount point. In case of any +// error, false (and an error) is returned. // -// The argument must be an absolute path, with all symlinks resolved, and clean. -// One way to ensure it is to process the path using filepath.Abs followed by -// filepath.EvalSymlinks before calling this function. +// If a non-existent path is specified, an appropriate error is returned. +// In case the caller is not interested in this particular error, it should +// be handled separately using e.g. errors.Is(err, os.ErrNotExist). func Mounted(path string) (bool, error) { // root is always mounted if path == string(os.PathSeparator) { diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go index b1c12d02b51..d5513a26d2f 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go @@ -1,4 +1,5 @@ -// +build freebsd,cgo openbsd,cgo +//go:build (freebsd && cgo) || (openbsd && cgo) || (darwin && cgo) +// +build freebsd,cgo openbsd,cgo darwin,cgo package mountinfo @@ -21,7 +22,7 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) if count == 0 { - return nil, fmt.Errorf("Failed to call getmntinfo") + return nil, fmt.Errorf("failed to call getmntinfo") } var entries []C.struct_statfs @@ -55,6 +56,10 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { } func mounted(path string) (bool, error) { + path, err := normalizePath(path) + if err != nil { + return false, err + } // Fast path: compare st.st_dev fields. // This should always work for FreeBSD and OpenBSD. mounted, err := mountedByStat(path) diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go index f09a70fa0d9..59332b07bf4 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go @@ -52,7 +52,7 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { numFields := len(fields) if numFields < 10 { // should be at least 10 fields - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + return nil, fmt.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields) } // separator field @@ -67,7 +67,7 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { for fields[sepIdx] != "-" { sepIdx-- if sepIdx == 5 { - return nil, fmt.Errorf("Parsing '%s' failed: missing - separator", text) + return nil, fmt.Errorf("parsing '%s' failed: missing - separator", text) } } @@ -75,46 +75,39 @@ func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { p.Mountpoint, err = unescape(fields[4]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: mount point: %w", fields[4], err) + return nil, fmt.Errorf("parsing '%s' failed: mount point: %w", fields[4], err) } p.FSType, err = unescape(fields[sepIdx+1]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: fstype: %w", fields[sepIdx+1], err) + return nil, fmt.Errorf("parsing '%s' failed: fstype: %w", fields[sepIdx+1], err) } p.Source, err = unescape(fields[sepIdx+2]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: source: %w", fields[sepIdx+2], err) + return nil, fmt.Errorf("parsing '%s' failed: source: %w", fields[sepIdx+2], err) } p.VFSOptions = fields[sepIdx+3] // ignore any numbers parsing errors, as there should not be any p.ID, _ = strconv.Atoi(fields[0]) p.Parent, _ = strconv.Atoi(fields[1]) - mm := strings.Split(fields[2], ":") + mm := strings.SplitN(fields[2], ":", 3) if len(mm) != 2 { - return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + return nil, fmt.Errorf("parsing '%s' failed: unexpected major:minor pair %s", text, mm) } p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) p.Root, err = unescape(fields[3]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: root: %w", fields[3], err) + return nil, fmt.Errorf("parsing '%s' failed: root: %w", fields[3], err) } p.Options = fields[5] // zero or more optional fields - switch { - case sepIdx == 6: - // zero, do nothing - case sepIdx == 7: - p.Optional = fields[6] - default: - p.Optional = strings.Join(fields[6:sepIdx-1], " ") - } + p.Optional = strings.Join(fields[6:sepIdx], " ") - // Run the filter after parsing all of the fields. + // Run the filter after parsing all fields. var skip, stop bool if filter != nil { skip, stop = filter(p) diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go index d33ebca0968..95769a76dad 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go @@ -1,4 +1,5 @@ -// +build !windows,!linux,!freebsd,!openbsd freebsd,!cgo openbsd,!cgo +//go:build (!windows && !linux && !freebsd && !openbsd && !darwin) || (freebsd && !cgo) || (openbsd && !cgo) || (darwin && !cgo) +// +build !windows,!linux,!freebsd,!openbsd,!darwin freebsd,!cgo openbsd,!cgo darwin,!cgo package mountinfo diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index 4e6c4b23623..82da6c6a898 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -21,6 +21,9 @@ import "github.com/opencontainers/image-spec/specs-go" type Index struct { specs.Versioned + // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + // Manifests references platform specific manifests. Manifests []Descriptor `json:"manifests"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 7ff32c40ba3..d72d15ce4bb 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -20,6 +20,9 @@ import "github.com/opencontainers/image-spec/specs-go" type Manifest struct { specs.Versioned + // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. Config Descriptor `json:"config"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 58f1095ab08..31f99cf645c 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -22,7 +22,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 0 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 1 + VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "-dev" diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go index 744d4e57054..8b1483c7de7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_linux.go @@ -3,7 +3,6 @@ package apparmor import ( "errors" "fmt" - "io/ioutil" "os" "sync" @@ -19,7 +18,7 @@ var ( func isEnabled() bool { checkAppArmor.Do(func() { if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil { - buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + buf, err := os.ReadFile("/sys/module/apparmor/parameters/enabled") appArmorEnabled = err == nil && len(buf) > 1 && buf[0] == 'Y' } }) @@ -52,7 +51,7 @@ func setProcAttr(attr, value string) error { // changeOnExec reimplements aa_change_onexec from libapparmor in Go func changeOnExec(name string) error { if err := setProcAttr("exec", "exec "+name); err != nil { - return fmt.Errorf("apparmor failed to apply profile: %s", err) + return fmt.Errorf("apparmor failed to apply profile: %w", err) } return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go index 1adadafec8e..684248f2559 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package apparmor diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go index 68a346ca531..ba2b2266c9d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -1,5 +1,3 @@ -// +build linux - package cgroups import ( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go deleted file mode 100644 index 278d507e288..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package cgroups diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/devices/devices_emulator.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/devices/devices_emulator.go index c08477cbb35..6c61ee4c033 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/devices/devices_emulator.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/devices/devices_emulator.go @@ -1,5 +1,3 @@ -// +build linux - // SPDX-License-Identifier: Apache-2.0 /* * Copyright (C) 2020 Aleksa Sarai @@ -22,14 +20,13 @@ package devices import ( "bufio" + "fmt" "io" - "regexp" "sort" "strconv" + "strings" "github.com/opencontainers/runc/libcontainer/devices" - - "github.com/pkg/errors" ) // deviceMeta is a Rule without the Allow or Permissions fields, and no @@ -79,19 +76,21 @@ func (e *Emulator) IsAllowAll() bool { return e.IsBlacklist() && len(e.rules) == 0 } -var devicesListRegexp = regexp.MustCompile(`^([abc])\s+(\d+|\*):(\d+|\*)\s+([rwm]+)$`) - func parseLine(line string) (*deviceRule, error) { - matches := devicesListRegexp.FindStringSubmatch(line) - if matches == nil { - return nil, errors.Errorf("line doesn't match devices.list format") + // Input: node major:minor perms. + fields := strings.FieldsFunc(line, func(r rune) bool { + return r == ' ' || r == ':' + }) + if len(fields) != 4 { + return nil, fmt.Errorf("malformed devices.list rule %s", line) } + var ( rule deviceRule - node = matches[1] - major = matches[2] - minor = matches[3] - perms = matches[4] + node = fields[0] + major = fields[1] + minor = fields[2] + perms = fields[3] ) // Parse the node type. @@ -107,8 +106,7 @@ func parseLine(line string) (*deviceRule, error) { case "c": rule.meta.node = devices.CharDevice default: - // Should never happen! - return nil, errors.Errorf("unknown device type %q", node) + return nil, fmt.Errorf("unknown device type %q", node) } // Parse the major number. @@ -117,7 +115,7 @@ func parseLine(line string) (*deviceRule, error) { } else { val, err := strconv.ParseUint(major, 10, 32) if err != nil { - return nil, errors.Wrap(err, "parse major number") + return nil, fmt.Errorf("invalid major number: %w", err) } rule.meta.major = int64(val) } @@ -128,7 +126,7 @@ func parseLine(line string) (*deviceRule, error) { } else { val, err := strconv.ParseUint(minor, 10, 32) if err != nil { - return nil, errors.Wrap(err, "parse minor number") + return nil, fmt.Errorf("invalid minor number: %w", err) } rule.meta.minor = int64(val) } @@ -136,13 +134,12 @@ func parseLine(line string) (*deviceRule, error) { // Parse the access permissions. rule.perms = devices.Permissions(perms) if !rule.perms.IsValid() || rule.perms.IsEmpty() { - // Should never happen! - return nil, errors.Errorf("parse access mode: contained unknown modes or is empty: %q", perms) + return nil, fmt.Errorf("parse access mode: contained unknown modes or is empty: %q", perms) } return &rule, nil } -func (e *Emulator) addRule(rule deviceRule) error { +func (e *Emulator) addRule(rule deviceRule) error { //nolint:unparam if e.rules == nil { e.rules = make(map[deviceMeta]devices.Permissions) } @@ -180,7 +177,7 @@ func (e *Emulator) rmRule(rule deviceRule) error { // Only give an error if the set of permissions overlap. partialPerms := e.rules[partialMeta] if !partialPerms.Intersection(rule.perms).IsEmpty() { - return errors.Errorf("requested rule [%v %v] not supported by devices cgroupv1 (cannot punch hole in existing wildcard rule [%v %v])", rule.meta, rule.perms, partialMeta, partialPerms) + return fmt.Errorf("requested rule [%v %v] not supported by devices cgroupv1 (cannot punch hole in existing wildcard rule [%v %v])", rule.meta, rule.perms, partialMeta, partialPerms) } } @@ -212,9 +209,9 @@ func (e *Emulator) allow(rule *deviceRule) error { var err error if e.defaultAllow { - err = errors.Wrap(e.rmRule(*rule), "remove 'deny' exception") + err = wrapErr(e.rmRule(*rule), "unable to remove 'deny' exception") } else { - err = errors.Wrap(e.addRule(*rule), "add 'allow' exception") + err = wrapErr(e.addRule(*rule), "unable to add 'allow' exception") } return err } @@ -232,16 +229,16 @@ func (e *Emulator) deny(rule *deviceRule) error { var err error if e.defaultAllow { - err = errors.Wrap(e.addRule(*rule), "add 'deny' exception") + err = wrapErr(e.addRule(*rule), "unable to add 'deny' exception") } else { - err = errors.Wrap(e.rmRule(*rule), "remove 'allow' exception") + err = wrapErr(e.rmRule(*rule), "unable to remove 'allow' exception") } return err } func (e *Emulator) Apply(rule devices.Rule) error { if !rule.Type.CanCgroup() { - return errors.Errorf("cannot add rule [%#v] with non-cgroup type %q", rule, rule.Type) + return fmt.Errorf("cannot add rule [%#v] with non-cgroup type %q", rule, rule.Type) } innerRule := &deviceRule{ @@ -283,17 +280,17 @@ func EmulatorFromList(list io.Reader) (*Emulator, error) { line := s.Text() deviceRule, err := parseLine(line) if err != nil { - return nil, errors.Wrapf(err, "parsing line %q", line) + return nil, fmt.Errorf("error parsing line %q: %w", line, err) } // "devices.list" is an allow list. Note that this means that in // black-list mode, we have no idea what rules are in play. As a // result, we need to be very careful in Transition(). if err := e.allow(deviceRule); err != nil { - return nil, errors.Wrapf(err, "adding devices.list rule") + return nil, fmt.Errorf("error adding devices.list rule: %w", err) } } if err := s.Err(); err != nil { - return nil, errors.Wrap(err, "reading devices.list lines") + return nil, fmt.Errorf("error reading devices.list lines: %w", err) } return e, nil } @@ -305,7 +302,7 @@ func EmulatorFromList(list io.Reader) (*Emulator, error) { // necessary. // // This function is the sole reason for all of Emulator -- to allow us -// to figure out how to update a containers' cgroups without causing spurrious +// to figure out how to update a containers' cgroups without causing spurious // device errors (if possible). func (source *Emulator) Transition(target *Emulator) ([]*devices.Rule, error) { var transitionRules []*devices.Rule @@ -380,3 +377,10 @@ func (e *Emulator) Rules() ([]*devices.Rule, error) { defaultCgroup := &Emulator{defaultAllow: false} return defaultCgroup.Transition(e) } + +func wrapErr(err error, text string) error { + if err == nil { + return nil + } + return fmt.Errorf(text+": %w", err) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go index 96cbca3916e..4e69b35bcda 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter/devicefilter.go @@ -7,13 +7,14 @@ package devicefilter import ( + "errors" + "fmt" "math" "strconv" "github.com/cilium/ebpf/asm" devicesemulator "github.com/opencontainers/runc/libcontainer/cgroups/devices" "github.com/opencontainers/runc/libcontainer/devices" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -51,21 +52,20 @@ func DeviceFilter(rules []*devices.Rule) (asm.Instructions, string, error) { // only be one (at most) at the very start to instruct cgroupv1 to // go into allow-list mode. However we do double-check this here. if idx != 0 || rule.Allow != emu.IsBlacklist() { - return nil, "", errors.Errorf("[internal error] emulated cgroupv2 devices ruleset had bad wildcard at idx %v (%s)", idx, rule.CgroupString()) + return nil, "", fmt.Errorf("[internal error] emulated cgroupv2 devices ruleset had bad wildcard at idx %v (%s)", idx, rule.CgroupString()) } continue } if rule.Allow == p.defaultAllow { // There should be no rules which have an action equal to the // default action, the emulator removes those. - return nil, "", errors.Errorf("[internal error] emulated cgroupv2 devices ruleset had no-op rule at idx %v (%s)", idx, rule.CgroupString()) + return nil, "", fmt.Errorf("[internal error] emulated cgroupv2 devices ruleset had no-op rule at idx %v (%s)", idx, rule.CgroupString()) } if err := p.appendRule(rule); err != nil { return nil, "", err } } - insts, err := p.finalize() - return insts, license, err + return p.finalize(), license, nil } type program struct { @@ -118,13 +118,13 @@ func (p *program) appendRule(rule *devices.Rule) error { bpfType = int32(unix.BPF_DEVCG_DEV_BLOCK) default: // We do not permit 'a', nor any other types we don't know about. - return errors.Errorf("invalid type %q", string(rule.Type)) + return fmt.Errorf("invalid type %q", string(rule.Type)) } if rule.Major > math.MaxUint32 { - return errors.Errorf("invalid major %d", rule.Major) + return fmt.Errorf("invalid major %d", rule.Major) } if rule.Minor > math.MaxUint32 { - return errors.Errorf("invalid minor %d", rule.Major) + return fmt.Errorf("invalid minor %d", rule.Major) } hasMajor := rule.Major >= 0 // if not specified in OCI json, major is set to -1 hasMinor := rule.Minor >= 0 @@ -138,7 +138,7 @@ func (p *program) appendRule(rule *devices.Rule) error { case 'm': bpfAccess |= unix.BPF_DEVCG_ACC_MKNOD default: - return errors.Errorf("unknown device access %v", r) + return fmt.Errorf("unknown device access %v", r) } } // If the access is rwm, skip the check. @@ -180,7 +180,7 @@ func (p *program) appendRule(rule *devices.Rule) error { return nil } -func (p *program) finalize() (asm.Instructions, error) { +func (p *program) finalize() asm.Instructions { var v int32 if p.defaultAllow { v = 1 @@ -192,7 +192,7 @@ func (p *program) finalize() (asm.Instructions, error) { asm.Return(), ) p.blockID = -1 - return p.insts, nil + return p.insts } func acceptBlock(accept bool) asm.Instructions { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go index 6c8de80dd86..104c74a890f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go @@ -1,6 +1,7 @@ package ebpf import ( + "errors" "fmt" "os" "runtime" @@ -10,7 +11,6 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/link" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -134,7 +134,7 @@ func haveBpfProgReplace() bool { // not supported return } - // attach_flags test succeded. + // attach_flags test succeeded. if !errors.Is(err, unix.EBADF) { logrus.Debugf("checking for BPF_F_REPLACE: got unexpected (not EBADF or EINVAL) error: %v", err) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go index 5f6ab9fd699..0cdaf747849 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go @@ -2,20 +2,27 @@ package cgroups import ( "bytes" + "errors" + "fmt" "os" + "path" + "strconv" "strings" "sync" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) // OpenFile opens a cgroup file in a given dir with given flags. -// It is supposed to be used for cgroup files only. +// It is supposed to be used for cgroup files only, and returns +// an error if the file is not a cgroup file. +// +// Arguments dir and file are joined together to form an absolute path +// to a file being opened. func OpenFile(dir, file string, flags int) (*os.File, error) { if dir == "" { - return nil, errors.Errorf("no directory specified for %s", file) + return nil, fmt.Errorf("no directory specified for %s", file) } return openFile(dir, file, flags) } @@ -43,7 +50,8 @@ func WriteFile(dir, file, data string) error { } defer fd.Close() if err := retryingWriteFile(fd, data); err != nil { - return errors.Wrapf(err, "failed to write %q", data) + // Having data in the error message helps in debugging. + return fmt.Errorf("failed to write %q: %w", data, err) } return nil } @@ -81,7 +89,7 @@ func prepareOpenat2() error { }) if err != nil { prepErr = &os.PathError{Op: "openat2", Path: cgroupfsDir, Err: err} - if err != unix.ENOSYS { + if err != unix.ENOSYS { //nolint:errorlint // unix errors are bare logrus.Warnf("falling back to securejoin: %s", prepErr) } else { logrus.Debug("openat2 not available, falling back to securejoin") @@ -107,8 +115,6 @@ func prepareOpenat2() error { return prepErr } -// OpenFile opens a cgroup file in a given dir with given flags. -// It is supposed to be used for cgroup files only. func openFile(dir, file string, flags int) (*os.File, error) { mode := os.FileMode(0) if TestMode && flags&os.O_WRONLY != 0 { @@ -116,34 +122,52 @@ func openFile(dir, file string, flags int) (*os.File, error) { flags |= os.O_TRUNC | os.O_CREATE mode = 0o600 } + path := path.Join(dir, file) if prepareOpenat2() != nil { - return openFallback(dir, file, flags, mode) + return openFallback(path, flags, mode) } - reldir := strings.TrimPrefix(dir, cgroupfsPrefix) - if len(reldir) == len(dir) { // non-standard path, old system? - return openFallback(dir, file, flags, mode) + relPath := strings.TrimPrefix(path, cgroupfsPrefix) + if len(relPath) == len(path) { // non-standard path, old system? + return openFallback(path, flags, mode) } - relname := reldir + "/" + file - fd, err := unix.Openat2(cgroupFd, relname, + fd, err := unix.Openat2(cgroupFd, relPath, &unix.OpenHow{ Resolve: resolveFlags, Flags: uint64(flags) | unix.O_CLOEXEC, Mode: uint64(mode), }) if err != nil { - return nil, &os.PathError{Op: "openat2", Path: dir + "/" + file, Err: err} + err = &os.PathError{Op: "openat2", Path: path, Err: err} + // Check if cgroupFd is still opened to cgroupfsDir + // (happens when this package is incorrectly used + // across the chroot/pivot_root/mntns boundary, or + // when /sys/fs/cgroup is remounted). + // + // TODO: if such usage will ever be common, amend this + // to reopen cgroupFd and retry openat2. + fdStr := strconv.Itoa(cgroupFd) + fdDest, _ := os.Readlink("/proc/self/fd/" + fdStr) + if fdDest != cgroupfsDir { + // Wrap the error so it is clear that cgroupFd + // is opened to an unexpected/wrong directory. + err = fmt.Errorf("cgroupFd %s unexpectedly opened to %s != %s: %w", + fdStr, fdDest, cgroupfsDir, err) + } + return nil, err } - return os.NewFile(uintptr(fd), cgroupfsPrefix+relname), nil + return os.NewFile(uintptr(fd), path), nil } var errNotCgroupfs = errors.New("not a cgroup file") -// openFallback is used when openat2(2) is not available. It checks the opened +// Can be changed by unit tests. +var openFallback = openAndCheck + +// openAndCheck is used when openat2(2) is not available. It checks the opened // file is on cgroupfs, returning an error otherwise. -func openFallback(dir, file string, flags int, mode os.FileMode) (*os.File, error) { - path := dir + "/" + file +func openAndCheck(path string, flags int, mode os.FileMode) (*os.File, error) { fd, err := os.OpenFile(path, flags, mode) if err != nil { return nil, err diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go index 88012a2f5f1..c81b6562ac5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go @@ -1,10 +1,7 @@ -// +build linux - package fs import ( "bufio" - "fmt" "os" "path/filepath" "strconv" @@ -23,8 +20,8 @@ func (s *BlkioGroup) Name() string { return "blkio" } -func (s *BlkioGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *BlkioGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *BlkioGroup) Set(path string, r *configs.Resources) error { @@ -131,19 +128,19 @@ func getBlkioStat(dir, file string) ([]cgroups.BlkioStatEntry, error) { // skip total line continue } else { - return nil, fmt.Errorf("Invalid line found while parsing %s/%s: %s", dir, file, sc.Text()) + return nil, malformedLine(dir, file, sc.Text()) } } v, err := strconv.ParseUint(fields[0], 10, 64) if err != nil { - return nil, err + return nil, &parseError{Path: dir, File: file, Err: err} } major := v v, err = strconv.ParseUint(fields[1], 10, 64) if err != nil { - return nil, err + return nil, &parseError{Path: dir, File: file, Err: err} } minor := v @@ -155,10 +152,13 @@ func getBlkioStat(dir, file string) ([]cgroups.BlkioStatEntry, error) { } v, err = strconv.ParseUint(fields[valueField], 10, 64) if err != nil { - return nil, err + return nil, &parseError{Path: dir, File: file, Err: err} } blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v}) } + if err := sc.Err(); err != nil { + return nil, &parseError{Path: dir, File: file, Err: err} + } return blkioStats, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go index 7ca8b8a8d40..6c79f899b48 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go @@ -1,5 +1,3 @@ -// +build linux - package fs import ( @@ -21,24 +19,19 @@ func (s *CpuGroup) Name() string { return "cpu" } -func (s *CpuGroup) Apply(path string, d *cgroupData) error { - // This might happen if we have no cpu cgroup mounted. - // Just do nothing and don't fail. - if path == "" { - return nil - } +func (s *CpuGroup) Apply(path string, r *configs.Resources, pid int) error { if err := os.MkdirAll(path, 0o755); err != nil { return err } // We should set the real-Time group scheduling settings before moving // in the process because if the process is already in SCHED_RR mode // and no RT bandwidth is set, adding it will fail. - if err := s.SetRtSched(path, d.config.Resources); err != nil { + if err := s.SetRtSched(path, r); err != nil { return err } - // Since we are not using join(), we need to place the pid - // into the procs file unlike other subsystems. - return cgroups.WriteCgroupProc(path, d.pid) + // Since we are not using apply(), we need to place the pid + // into the procs file. + return cgroups.WriteCgroupProc(path, pid) } func (s *CpuGroup) SetRtSched(path string, r *configs.Resources) error { @@ -105,7 +98,8 @@ func (s *CpuGroup) Set(path string, r *configs.Resources) error { } func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { - f, err := cgroups.OpenFile(path, "cpu.stat", os.O_RDONLY) + const file = "cpu.stat" + f, err := cgroups.OpenFile(path, file, os.O_RDONLY) if err != nil { if os.IsNotExist(err) { return nil @@ -118,7 +112,7 @@ func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { for sc.Scan() { t, v, err := fscommon.ParseKeyValue(sc.Text()) if err != nil { - return err + return &parseError{Path: path, File: file, Err: err} } switch t { case "nr_periods": diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go index 4fbf078494c..d3bd7e111c7 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go @@ -1,12 +1,8 @@ -// +build linux - package fs import ( "bufio" - "fmt" "os" - "path/filepath" "strconv" "strings" @@ -38,8 +34,8 @@ func (s *CpuacctGroup) Name() string { return "cpuacct" } -func (s *CpuacctGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *CpuacctGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *CpuacctGroup) Set(_ string, _ *configs.Resources) error { @@ -85,45 +81,43 @@ func getCpuUsageBreakdown(path string) (uint64, uint64, error) { const ( userField = "user" systemField = "system" + file = cgroupCpuacctStat ) // Expected format: // user // system - data, err := cgroups.ReadFile(path, cgroupCpuacctStat) + data, err := cgroups.ReadFile(path, file) if err != nil { return 0, 0, err } + // TODO: use strings.SplitN instead. fields := strings.Fields(data) - if len(fields) < 4 { - return 0, 0, fmt.Errorf("failure - %s is expected to have at least 4 fields", filepath.Join(path, cgroupCpuacctStat)) - } - if fields[0] != userField { - return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField) - } - if fields[2] != systemField { - return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField) + if len(fields) < 4 || fields[0] != userField || fields[2] != systemField { + return 0, 0, malformedLine(path, file, data) } if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil { - return 0, 0, err + return 0, 0, &parseError{Path: path, File: file, Err: err} } if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil { - return 0, 0, err + return 0, 0, &parseError{Path: path, File: file, Err: err} } return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil } func getPercpuUsage(path string) ([]uint64, error) { + const file = "cpuacct.usage_percpu" percpuUsage := []uint64{} - data, err := cgroups.ReadFile(path, "cpuacct.usage_percpu") + data, err := cgroups.ReadFile(path, file) if err != nil { return percpuUsage, err } + // TODO: use strings.SplitN instead. for _, value := range strings.Fields(data) { value, err := strconv.ParseUint(value, 10, 64) if err != nil { - return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err) + return percpuUsage, &parseError{Path: path, File: file, Err: err} } percpuUsage = append(percpuUsage, value) } @@ -133,16 +127,17 @@ func getPercpuUsage(path string) ([]uint64, error) { func getPercpuUsageInModes(path string) ([]uint64, []uint64, error) { usageKernelMode := []uint64{} usageUserMode := []uint64{} + const file = cgroupCpuacctUsageAll - file, err := cgroups.OpenFile(path, cgroupCpuacctUsageAll, os.O_RDONLY) + fd, err := cgroups.OpenFile(path, file, os.O_RDONLY) if os.IsNotExist(err) { return usageKernelMode, usageUserMode, nil } else if err != nil { return nil, nil, err } - defer file.Close() + defer fd.Close() - scanner := bufio.NewScanner(file) + scanner := bufio.NewScanner(fd) scanner.Scan() // skipping header line for scanner.Scan() { @@ -153,19 +148,18 @@ func getPercpuUsageInModes(path string) ([]uint64, []uint64, error) { usageInKernelMode, err := strconv.ParseUint(lineFields[kernelModeColumn], 10, 64) if err != nil { - return nil, nil, fmt.Errorf("Unable to convert CPU usage in kernel mode to uint64: %s", err) + return nil, nil, &parseError{Path: path, File: file, Err: err} } usageKernelMode = append(usageKernelMode, usageInKernelMode) usageInUserMode, err := strconv.ParseUint(lineFields[userModeColumn], 10, 64) if err != nil { - return nil, nil, fmt.Errorf("Unable to convert CPU usage in user mode to uint64: %s", err) + return nil, nil, &parseError{Path: path, File: file, Err: err} } usageUserMode = append(usageUserMode, usageInUserMode) } - if err := scanner.Err(); err != nil { - return nil, nil, fmt.Errorf("Problem in reading %s line by line, %s", cgroupCpuacctUsageAll, err) + return nil, nil, &parseError{Path: path, File: file, Err: err} } return usageKernelMode, usageUserMode, nil diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go index 58a0f040644..550baa42756 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go @@ -1,19 +1,17 @@ -// +build linux - package fs import ( - "fmt" + "errors" "os" "path/filepath" "strconv" "strings" + "golang.org/x/sys/unix" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/pkg/errors" - "golang.org/x/sys/unix" ) type CpusetGroup struct{} @@ -22,8 +20,8 @@ func (s *CpusetGroup) Name() string { return "cpuset" } -func (s *CpusetGroup) Apply(path string, d *cgroupData) error { - return s.ApplyDir(path, d.config.Resources, d.pid) +func (s *CpusetGroup) Apply(path string, r *configs.Resources, pid int) error { + return s.ApplyDir(path, r, pid) } func (s *CpusetGroup) Set(path string, r *configs.Resources) error { @@ -40,32 +38,32 @@ func (s *CpusetGroup) Set(path string, r *configs.Resources) error { return nil } -func getCpusetStat(path string, filename string) ([]uint16, error) { +func getCpusetStat(path string, file string) ([]uint16, error) { var extracted []uint16 - fileContent, err := fscommon.GetCgroupParamString(path, filename) + fileContent, err := fscommon.GetCgroupParamString(path, file) if err != nil { return extracted, err } if len(fileContent) == 0 { - return extracted, fmt.Errorf("%s found to be empty", filepath.Join(path, filename)) + return extracted, &parseError{Path: path, File: file, Err: errors.New("empty file")} } for _, s := range strings.Split(fileContent, ",") { - splitted := strings.SplitN(s, "-", 3) - switch len(splitted) { + sp := strings.SplitN(s, "-", 3) + switch len(sp) { case 3: - return extracted, fmt.Errorf("invalid values in %s", filepath.Join(path, filename)) + return extracted, &parseError{Path: path, File: file, Err: errors.New("extra dash")} case 2: - min, err := strconv.ParseUint(splitted[0], 10, 16) + min, err := strconv.ParseUint(sp[0], 10, 16) if err != nil { - return extracted, err + return extracted, &parseError{Path: path, File: file, Err: err} } - max, err := strconv.ParseUint(splitted[1], 10, 16) + max, err := strconv.ParseUint(sp[1], 10, 16) if err != nil { - return extracted, err + return extracted, &parseError{Path: path, File: file, Err: err} } if min > max { - return extracted, fmt.Errorf("invalid values in %s", filepath.Join(path, filename)) + return extracted, &parseError{Path: path, File: file, Err: errors.New("invalid values, min > max")} } for i := min; i <= max; i++ { extracted = append(extracted, uint16(i)) @@ -73,7 +71,7 @@ func getCpusetStat(path string, filename string) ([]uint16, error) { case 1: value, err := strconv.ParseUint(s, 10, 16) if err != nil { - return extracted, err + return extracted, &parseError{Path: path, File: file, Err: err} } extracted = append(extracted, uint16(value)) } @@ -168,9 +166,8 @@ func (s *CpusetGroup) ApplyDir(dir string, r *configs.Resources, pid int) error if err := s.ensureCpusAndMems(dir, r); err != nil { return err } - - // because we are not using d.join we need to place the pid into the procs file - // unlike the other subsystems + // Since we are not using apply(), we need to place the pid + // into the procs file. return cgroups.WriteCgroupProc(dir, pid) } @@ -198,7 +195,7 @@ func cpusetEnsureParent(current string) error { } // Treat non-existing directory as cgroupfs as it will be created, // and the root cpuset directory obviously exists. - if err != nil && err != unix.ENOENT { + if err != nil && err != unix.ENOENT { //nolint:errorlint // unix errors are bare return &os.PathError{Op: "statfs", Path: parent, Err: err} } @@ -224,12 +221,12 @@ func cpusetCopyIfNeeded(current, parent string) error { } if isEmptyCpuset(currentCpus) { - if err := cgroups.WriteFile(current, "cpuset.cpus", string(parentCpus)); err != nil { + if err := cgroups.WriteFile(current, "cpuset.cpus", parentCpus); err != nil { return err } } if isEmptyCpuset(currentMems) { - if err := cgroups.WriteFile(current, "cpuset.mems", string(parentMems)); err != nil { + if err := cgroups.WriteFile(current, "cpuset.mems", parentMems); err != nil { return err } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go index dcf69ce13e7..4527a70ebfc 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go @@ -1,5 +1,3 @@ -// +build linux - package fs import ( @@ -15,15 +13,15 @@ import ( ) type DevicesGroup struct { - testingSkipFinalCheck bool + TestingSkipFinalCheck bool } func (s *DevicesGroup) Name() string { return "devices" } -func (s *DevicesGroup) Apply(path string, d *cgroupData) error { - if d.config.SkipDevices { +func (s *DevicesGroup) Apply(path string, r *configs.Resources, pid int) error { + if r.SkipDevices { return nil } if path == "" { @@ -31,7 +29,8 @@ func (s *DevicesGroup) Apply(path string, d *cgroupData) error { // is a hard requirement for container's security. return errSubsystemDoesNotExist } - return join(path, d.pid) + + return apply(path, pid) } func loadEmulator(path string) (*cgroupdevices.Emulator, error) { @@ -91,7 +90,7 @@ func (s *DevicesGroup) Set(path string, r *configs.Resources) error { // // This safety-check is skipped for the unit tests because we cannot // currently mock devices.list correctly. - if !s.testingSkipFinalCheck { + if !s.TestingSkipFinalCheck { currentAfter, err := loadEmulator(path) if err != nil { return err diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go new file mode 100644 index 00000000000..f2ab6f130e3 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/error.go @@ -0,0 +1,15 @@ +package fs + +import ( + "fmt" + + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" +) + +type parseError = fscommon.ParseError + +// malformedLine is used by all cgroupfs file parsers that expect a line +// in a particular format but get some garbage instead. +func malformedLine(path, file, line string) error { + return &parseError{Path: path, File: file, Err: fmt.Errorf("malformed line: %s", line)} +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go index 4baa2798ac9..987f1bf5e74 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go @@ -1,5 +1,3 @@ -// +build linux - package fs import ( @@ -21,8 +19,8 @@ func (s *FreezerGroup) Name() string { return "freezer" } -func (s *FreezerGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *FreezerGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *FreezerGroup) Set(path string, r *configs.Resources) (Err error) { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go index 777b94f0356..fb4fcc7f75b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go @@ -1,165 +1,86 @@ -// +build linux - package fs import ( + "errors" "fmt" "os" - "path/filepath" "sync" + "golang.org/x/sys/unix" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" - libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" - "github.com/pkg/errors" - "golang.org/x/sys/unix" ) -var ( - subsystems = []subsystem{ - &CpusetGroup{}, - &DevicesGroup{}, - &MemoryGroup{}, - &CpuGroup{}, - &CpuacctGroup{}, - &PidsGroup{}, - &BlkioGroup{}, - &HugetlbGroup{}, - &NetClsGroup{}, - &NetPrioGroup{}, - &PerfEventGroup{}, - &FreezerGroup{}, - &NameGroup{GroupName: "name=systemd", Join: true}, - } - HugePageSizes, _ = cgroups.GetHugePageSize() -) +var subsystems = []subsystem{ + &CpusetGroup{}, + &DevicesGroup{}, + &MemoryGroup{}, + &CpuGroup{}, + &CpuacctGroup{}, + &PidsGroup{}, + &BlkioGroup{}, + &HugetlbGroup{}, + &NetClsGroup{}, + &NetPrioGroup{}, + &PerfEventGroup{}, + &FreezerGroup{}, + &RdmaGroup{}, + &NameGroup{GroupName: "name=systemd", Join: true}, +} var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist") +func init() { + // If using cgroups-hybrid mode then add a "" controller indicating + // it should join the cgroups v2. + if cgroups.IsCgroup2HybridMode() { + subsystems = append(subsystems, &NameGroup{GroupName: "", Join: true}) + } +} + type subsystem interface { // Name returns the name of the subsystem. Name() string - // Returns the stats, as 'stats', corresponding to the cgroup under 'path'. + // GetStats fills in the stats for the subsystem. GetStats(path string, stats *cgroups.Stats) error - // Creates and joins the cgroup represented by 'cgroupData'. - Apply(path string, c *cgroupData) error + // Apply creates and joins a cgroup, adding pid into it. Some + // subsystems use resources to pre-configure the cgroup parents + // before creating or joining it. + Apply(path string, r *configs.Resources, pid int) error // Set sets the cgroup resources. Set(path string, r *configs.Resources) error } type manager struct { - mu sync.Mutex - cgroups *configs.Cgroup - rootless bool // ignore permission-related errors - paths map[string]string -} - -func NewManager(cg *configs.Cgroup, paths map[string]string, rootless bool) cgroups.Manager { - return &manager{ - cgroups: cg, - paths: paths, - rootless: rootless, - } -} - -// The absolute path to the root of the cgroup hierarchies. -var ( - cgroupRootLock sync.Mutex - cgroupRoot string -) - -const defaultCgroupRoot = "/sys/fs/cgroup" - -func tryDefaultCgroupRoot() string { - var st, pst unix.Stat_t - - // (1) it should be a directory... - err := unix.Lstat(defaultCgroupRoot, &st) - if err != nil || st.Mode&unix.S_IFDIR == 0 { - return "" - } - - // (2) ... and a mount point ... - err = unix.Lstat(filepath.Dir(defaultCgroupRoot), &pst) - if err != nil { - return "" - } - - if st.Dev == pst.Dev { - // parent dir has the same dev -- not a mount point - return "" - } - - // (3) ... of 'tmpfs' fs type. - var fst unix.Statfs_t - err = unix.Statfs(defaultCgroupRoot, &fst) - if err != nil || fst.Type != unix.TMPFS_MAGIC { - return "" - } - - // (4) it should have at least 1 entry ... - dir, err := os.Open(defaultCgroupRoot) - if err != nil { - return "" - } - names, err := dir.Readdirnames(1) - if err != nil { - return "" - } - if len(names) < 1 { - return "" - } - // ... which is a cgroup mount point. - err = unix.Statfs(filepath.Join(defaultCgroupRoot, names[0]), &fst) - if err != nil || fst.Type != unix.CGROUP_SUPER_MAGIC { - return "" - } - - return defaultCgroupRoot + mu sync.Mutex + cgroups *configs.Cgroup + paths map[string]string } -// Gets the cgroupRoot. -func getCgroupRoot() (string, error) { - cgroupRootLock.Lock() - defer cgroupRootLock.Unlock() - - if cgroupRoot != "" { - return cgroupRoot, nil - } - - // fast path - cgroupRoot = tryDefaultCgroupRoot() - if cgroupRoot != "" { - return cgroupRoot, nil +func NewManager(cg *configs.Cgroup, paths map[string]string) (cgroups.Manager, error) { + // Some v1 controllers (cpu, cpuset, and devices) expect + // cgroups.Resources to not be nil in Apply. + if cg.Resources == nil { + return nil, errors.New("cgroup v1 manager needs configs.Resources to be set during manager creation") } - - // slow path: parse mountinfo - mi, err := cgroups.GetCgroupMounts(false) - if err != nil { - return "", err - } - if len(mi) < 1 { - return "", errors.New("no cgroup mount found in mountinfo") + if cg.Resources.Unified != nil { + return nil, cgroups.ErrV1NoUnified } - // Get the first cgroup mount (e.g. "/sys/fs/cgroup/memory"), - // use its parent directory. - root := filepath.Dir(mi[0].Mountpoint) - - if _, err := os.Stat(root); err != nil { - return "", err + if paths == nil { + var err error + paths, err = initPaths(cg) + if err != nil { + return nil, err + } } - cgroupRoot = root - return cgroupRoot, nil -} - -type cgroupData struct { - root string - innerPath string - config *configs.Cgroup - pid int + return &manager{ + cgroups: cg, + paths: paths, + }, nil } // isIgnorableError returns whether err is a permission error (in the loose @@ -171,8 +92,6 @@ func isIgnorableError(rootless bool, err error) bool { if !rootless { return false } - // TODO: rm errors.Cause once we switch to %w everywhere - err = errors.Cause(err) // Is it an ordinary EPERM? if errors.Is(err, os.ErrPermission) { return true @@ -186,56 +105,30 @@ func isIgnorableError(rootless bool, err error) bool { } func (m *manager) Apply(pid int) (err error) { - if m.cgroups == nil { - return nil - } m.mu.Lock() defer m.mu.Unlock() c := m.cgroups - if c.Resources.Unified != nil { - return cgroups.ErrV1NoUnified - } - - m.paths = make(map[string]string) - if c.Paths != nil { - cgMap, err := cgroups.ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return err - } - for name, path := range c.Paths { - // XXX(kolyshkin@): why this check is needed? - if _, ok := cgMap[name]; ok { - m.paths[name] = path - } - } - return cgroups.EnterPid(m.paths, pid) - } - - d, err := getCgroupData(m.cgroups, pid) - if err != nil { - return err - } for _, sys := range subsystems { - p, err := d.path(sys.Name()) - if err != nil { - // The non-presence of the devices subsystem is - // considered fatal for security reasons. - if cgroups.IsNotFound(err) && (c.SkipDevices || sys.Name() != "devices") { - continue - } - return err + name := sys.Name() + p, ok := m.paths[name] + if !ok { + continue } - m.paths[sys.Name()] = p - if err := sys.Apply(p, d); err != nil { + if err := sys.Apply(p, c.Resources, pid); err != nil { // In the case of rootless (including euid=0 in userns), where an // explicit cgroup path hasn't been set, we don't bail on error in - // case of permission problems. Cases where limits have been set - // (and we couldn't create our own cgroup) are handled by Set. - if isIgnorableError(m.rootless, err) && m.cgroups.Path == "" { - delete(m.paths, sys.Name()) + // case of permission problems here, but do delete the path from + // the m.paths map, since it is either non-existent and could not + // be created, or the pid could not be added to it. + // + // Cases where limits for the subsystem have been set are handled + // later by Set, which fails with a friendly error (see + // if path == "" in Set). + if isIgnorableError(c.Rootless, err) && c.Path == "" { + delete(m.paths, name) continue } return err @@ -246,9 +139,6 @@ func (m *manager) Apply(pid int) (err error) { } func (m *manager) Destroy() error { - if m.cgroups == nil || m.cgroups.Paths != nil { - return nil - } m.mu.Lock() defer m.mu.Unlock() return cgroups.RemovePaths(m.paths) @@ -281,11 +171,6 @@ func (m *manager) Set(r *configs.Resources) error { return nil } - // If Paths are set, then we are just joining cgroups paths - // and there is no need to set any values. - if m.cgroups != nil && m.cgroups.Paths != nil { - return nil - } if r.Unified != nil { return cgroups.ErrV1NoUnified } @@ -295,10 +180,11 @@ func (m *manager) Set(r *configs.Resources) error { for _, sys := range subsystems { path := m.paths[sys.Name()] if err := sys.Set(path, r); err != nil { - if m.rootless && sys.Name() == "devices" { + // When rootless is true, errors from the device subsystem + // are ignored, as it is really not expected to work. + if m.cgroups.Rootless && sys.Name() == "devices" { continue } - // When m.rootless is true, errors from the device subsystem are ignored because it is really not expected to work. // However, errors from other subsystems are not ignored. // see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error" if path == "" { @@ -317,7 +203,7 @@ func (m *manager) Set(r *configs.Resources) error { // provided func (m *manager) Freeze(state configs.FreezerState) error { path := m.Path("freezer") - if m.cgroups == nil || path == "" { + if path == "" { return errors.New("cannot toggle freezer: cgroups not configured for container") } @@ -339,68 +225,6 @@ func (m *manager) GetAllPids() ([]int, error) { return cgroups.GetAllPids(m.Path("devices")) } -func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) { - root, err := getCgroupRoot() - if err != nil { - return nil, err - } - - if (c.Name != "" || c.Parent != "") && c.Path != "" { - return nil, errors.New("cgroup: either Path or Name and Parent should be used") - } - - // XXX: Do not remove this code. Path safety is important! -- cyphar - cgPath := libcontainerUtils.CleanPath(c.Path) - cgParent := libcontainerUtils.CleanPath(c.Parent) - cgName := libcontainerUtils.CleanPath(c.Name) - - innerPath := cgPath - if innerPath == "" { - innerPath = filepath.Join(cgParent, cgName) - } - - return &cgroupData{ - root: root, - innerPath: innerPath, - config: c, - pid: pid, - }, nil -} - -func (raw *cgroupData) path(subsystem string) (string, error) { - // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. - if filepath.IsAbs(raw.innerPath) { - mnt, err := cgroups.FindCgroupMountpoint(raw.root, subsystem) - // If we didn't mount the subsystem, there is no point we make the path. - if err != nil { - return "", err - } - - // Sometimes subsystems can be mounted together as 'cpu,cpuacct'. - return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil - } - - // Use GetOwnCgroupPath instead of GetInitCgroupPath, because the creating - // process could in container and shared pid namespace with host, and - // /proc/1/cgroup could point to whole other world of cgroups. - parentPath, err := cgroups.GetOwnCgroupPath(subsystem) - if err != nil { - return "", err - } - - return filepath.Join(parentPath, raw.innerPath), nil -} - -func join(path string, pid int) error { - if path == "" { - return nil - } - if err := os.MkdirAll(path, 0o755); err != nil { - return err - } - return cgroups.WriteCgroupProc(path, pid) -} - func (m *manager) GetPaths() map[string]string { m.mu.Lock() defer m.mu.Unlock() @@ -432,7 +256,7 @@ func OOMKillCount(path string) (uint64, error) { func (m *manager) OOMKillCount() (uint64, error) { c, err := OOMKillCount(m.Path("memory")) // Ignore ENOENT when rootless as it couldn't create cgroup. - if err != nil && m.rootless && os.IsNotExist(err) { + if err != nil && m.cgroups.Rootless && os.IsNotExist(err) { err = nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go index 3cafc5399ee..8ddd6fdd837 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go @@ -1,9 +1,6 @@ -// +build linux - package fs import ( - "fmt" "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" @@ -17,8 +14,8 @@ func (s *HugetlbGroup) Name() string { return "hugetlb" } -func (s *HugetlbGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *HugetlbGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *HugetlbGroup) Set(path string, r *configs.Resources) error { @@ -32,29 +29,29 @@ func (s *HugetlbGroup) Set(path string, r *configs.Resources) error { } func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error { - hugetlbStats := cgroups.HugetlbStats{} if !cgroups.PathExists(path) { return nil } - for _, pageSize := range HugePageSizes { + hugetlbStats := cgroups.HugetlbStats{} + for _, pageSize := range cgroups.HugePageSizes() { usage := "hugetlb." + pageSize + ".usage_in_bytes" value, err := fscommon.GetCgroupParamUint(path, usage) if err != nil { - return fmt.Errorf("failed to parse %s - %v", usage, err) + return err } hugetlbStats.Usage = value maxUsage := "hugetlb." + pageSize + ".max_usage_in_bytes" value, err = fscommon.GetCgroupParamUint(path, maxUsage) if err != nil { - return fmt.Errorf("failed to parse %s - %v", maxUsage, err) + return err } hugetlbStats.MaxUsage = value failcnt := "hugetlb." + pageSize + ".failcnt" value, err = fscommon.GetCgroupParamUint(path, failcnt) if err != nil { - return fmt.Errorf("failed to parse %s - %v", failcnt, err) + return err } hugetlbStats.Failcnt = value diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go index 33946726f15..b7c75f94138 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go @@ -1,9 +1,8 @@ -// +build linux - package fs import ( "bufio" + "errors" "fmt" "math" "os" @@ -11,11 +10,11 @@ import ( "strconv" "strings" + "golang.org/x/sys/unix" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/pkg/errors" - "golang.org/x/sys/unix" ) const ( @@ -31,8 +30,8 @@ func (s *MemoryGroup) Name() string { return "memory" } -func (s *MemoryGroup) Apply(path string, d *cgroupData) (err error) { - return join(path, d.pid) +func (s *MemoryGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func setMemory(path string, val int64) error { @@ -56,7 +55,7 @@ func setMemory(path string, val int64) error { return err } - return errors.Errorf("unable to set memory limit to %d (current usage: %d, peak usage: %d)", val, usage, max) + return fmt.Errorf("unable to set memory limit to %d (current usage: %d, peak usage: %d)", val, usage, max) } func setSwap(path string, val int64) error { @@ -134,15 +133,15 @@ func (s *MemoryGroup) Set(path string, r *configs.Resources) error { return err } } else { - return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", *r.MemorySwappiness) + return fmt.Errorf("invalid memory swappiness value: %d (valid range is 0-100)", *r.MemorySwappiness) } return nil } func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { - // Set stats from memory.stat. - statsFile, err := cgroups.OpenFile(path, "memory.stat", os.O_RDONLY) + const file = "memory.stat" + statsFile, err := cgroups.OpenFile(path, file, os.O_RDONLY) if err != nil { if os.IsNotExist(err) { return nil @@ -155,7 +154,7 @@ func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { for sc.Scan() { t, v, err := fscommon.ParseKeyValue(sc.Text()) if err != nil { - return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) + return &parseError{Path: path, File: file, Err: err} } stats.MemoryStats.Stats[t] = v } @@ -220,42 +219,42 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) { // are optional in the kernel. return cgroups.MemoryData{}, nil } - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err) + return cgroups.MemoryData{}, err } memoryData.Usage = value value, err = fscommon.GetCgroupParamUint(path, maxUsage) if err != nil { - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err) + return cgroups.MemoryData{}, err } memoryData.MaxUsage = value value, err = fscommon.GetCgroupParamUint(path, failcnt) if err != nil { - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err) + return cgroups.MemoryData{}, err } memoryData.Failcnt = value value, err = fscommon.GetCgroupParamUint(path, limit) if err != nil { - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err) + return cgroups.MemoryData{}, err } memoryData.Limit = value return memoryData, nil } -func getPageUsageByNUMA(cgroupPath string) (cgroups.PageUsageByNUMA, error) { +func getPageUsageByNUMA(path string) (cgroups.PageUsageByNUMA, error) { const ( maxColumns = math.MaxUint8 + 1 - filename = "memory.numa_stat" + file = "memory.numa_stat" ) stats := cgroups.PageUsageByNUMA{} - file, err := cgroups.OpenFile(cgroupPath, filename, os.O_RDONLY) + fd, err := cgroups.OpenFile(path, file, os.O_RDONLY) if os.IsNotExist(err) { return stats, nil } else if err != nil { return stats, err } - defer file.Close() + defer fd.Close() // File format is documented in linux/Documentation/cgroup-v1/memory.txt // and it looks like this: @@ -266,7 +265,7 @@ func getPageUsageByNUMA(cgroupPath string) (cgroups.PageUsageByNUMA, error) { // unevictable= N0= N1= ... // hierarchical_= N0= N1= ... - scanner := bufio.NewScanner(file) + scanner := bufio.NewScanner(fd) for scanner.Scan() { var field *cgroups.PageStats @@ -284,8 +283,7 @@ func getPageUsageByNUMA(cgroupPath string) (cgroups.PageUsageByNUMA, error) { } else { // The first column was already validated, // so be strict to the rest. - return stats, fmt.Errorf("malformed line %q in %s", - line, filename) + return stats, malformedLine(path, file, line) } } key, val := byNode[0], byNode[1] @@ -296,24 +294,23 @@ func getPageUsageByNUMA(cgroupPath string) (cgroups.PageUsageByNUMA, error) { } field.Total, err = strconv.ParseUint(val, 0, 64) if err != nil { - return stats, err + return stats, &parseError{Path: path, File: file, Err: err} } field.Nodes = map[uint8]uint64{} } else { // Subsequent columns: key is N, val is usage. if len(key) < 2 || key[0] != 'N' { // This is definitely an error. - return stats, fmt.Errorf("malformed line %q in %s", - line, filename) + return stats, malformedLine(path, file, line) } n, err := strconv.ParseUint(key[1:], 10, 8) if err != nil { - return cgroups.PageUsageByNUMA{}, err + return stats, &parseError{Path: path, File: file, Err: err} } usage, err := strconv.ParseUint(val, 10, 64) if err != nil { - return cgroups.PageUsageByNUMA{}, err + return stats, &parseError{Path: path, File: file, Err: err} } field.Nodes[uint8(n)] = usage @@ -321,9 +318,8 @@ func getPageUsageByNUMA(cgroupPath string) (cgroups.PageUsageByNUMA, error) { } } - err = scanner.Err() - if err != nil { - return cgroups.PageUsageByNUMA{}, err + if err := scanner.Err(); err != nil { + return cgroups.PageUsageByNUMA{}, &parseError{Path: path, File: file, Err: err} } return stats, nil diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go index 94a94b5e8d0..b8d5d849c52 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go @@ -1,5 +1,3 @@ -// +build linux - package fs import ( @@ -16,10 +14,10 @@ func (s *NameGroup) Name() string { return s.GroupName } -func (s *NameGroup) Apply(path string, d *cgroupData) error { +func (s *NameGroup) Apply(path string, _ *configs.Resources, pid int) error { if s.Join { - // ignore errors if the named cgroup does not exist - _ = join(path, d.pid) + // Ignore errors if the named cgroup does not exist. + _ = apply(path, pid) } return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go index f2617aa4442..abfd09ce83a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go @@ -1,5 +1,3 @@ -// +build linux - package fs import ( @@ -15,8 +13,8 @@ func (s *NetClsGroup) Name() string { return "net_cls" } -func (s *NetClsGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *NetClsGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *NetClsGroup) Set(path string, r *configs.Resources) error { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go index d0ac5e66bbb..da74d377989 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go @@ -1,5 +1,3 @@ -// +build linux - package fs import ( @@ -13,8 +11,8 @@ func (s *NetPrioGroup) Name() string { return "net_prio" } -func (s *NetPrioGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *NetPrioGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *NetPrioGroup) Set(path string, r *configs.Resources) error { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go new file mode 100644 index 00000000000..1092331b25d --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/paths.go @@ -0,0 +1,186 @@ +package fs + +import ( + "errors" + "os" + "path/filepath" + "sync" + + "golang.org/x/sys/unix" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/utils" +) + +// The absolute path to the root of the cgroup hierarchies. +var ( + cgroupRootLock sync.Mutex + cgroupRoot string +) + +const defaultCgroupRoot = "/sys/fs/cgroup" + +func initPaths(cg *configs.Cgroup) (map[string]string, error) { + root, err := rootPath() + if err != nil { + return nil, err + } + + inner, err := innerPath(cg) + if err != nil { + return nil, err + } + + paths := make(map[string]string) + for _, sys := range subsystems { + name := sys.Name() + path, err := subsysPath(root, inner, name) + if err != nil { + // The non-presence of the devices subsystem + // is considered fatal for security reasons. + if cgroups.IsNotFound(err) && (cg.SkipDevices || name != "devices") { + continue + } + + return nil, err + } + paths[name] = path + } + + return paths, nil +} + +func tryDefaultCgroupRoot() string { + var st, pst unix.Stat_t + + // (1) it should be a directory... + err := unix.Lstat(defaultCgroupRoot, &st) + if err != nil || st.Mode&unix.S_IFDIR == 0 { + return "" + } + + // (2) ... and a mount point ... + err = unix.Lstat(filepath.Dir(defaultCgroupRoot), &pst) + if err != nil { + return "" + } + + if st.Dev == pst.Dev { + // parent dir has the same dev -- not a mount point + return "" + } + + // (3) ... of 'tmpfs' fs type. + var fst unix.Statfs_t + err = unix.Statfs(defaultCgroupRoot, &fst) + if err != nil || fst.Type != unix.TMPFS_MAGIC { + return "" + } + + // (4) it should have at least 1 entry ... + dir, err := os.Open(defaultCgroupRoot) + if err != nil { + return "" + } + names, err := dir.Readdirnames(1) + if err != nil { + return "" + } + if len(names) < 1 { + return "" + } + // ... which is a cgroup mount point. + err = unix.Statfs(filepath.Join(defaultCgroupRoot, names[0]), &fst) + if err != nil || fst.Type != unix.CGROUP_SUPER_MAGIC { + return "" + } + + return defaultCgroupRoot +} + +// rootPath finds and returns path to the root of the cgroup hierarchies. +func rootPath() (string, error) { + cgroupRootLock.Lock() + defer cgroupRootLock.Unlock() + + if cgroupRoot != "" { + return cgroupRoot, nil + } + + // fast path + cgroupRoot = tryDefaultCgroupRoot() + if cgroupRoot != "" { + return cgroupRoot, nil + } + + // slow path: parse mountinfo + mi, err := cgroups.GetCgroupMounts(false) + if err != nil { + return "", err + } + if len(mi) < 1 { + return "", errors.New("no cgroup mount found in mountinfo") + } + + // Get the first cgroup mount (e.g. "/sys/fs/cgroup/memory"), + // use its parent directory. + root := filepath.Dir(mi[0].Mountpoint) + + if _, err := os.Stat(root); err != nil { + return "", err + } + + cgroupRoot = root + return cgroupRoot, nil +} + +func innerPath(c *configs.Cgroup) (string, error) { + if (c.Name != "" || c.Parent != "") && c.Path != "" { + return "", errors.New("cgroup: either Path or Name and Parent should be used") + } + + // XXX: Do not remove CleanPath. Path safety is important! -- cyphar + innerPath := utils.CleanPath(c.Path) + if innerPath == "" { + cgParent := utils.CleanPath(c.Parent) + cgName := utils.CleanPath(c.Name) + innerPath = filepath.Join(cgParent, cgName) + } + + return innerPath, nil +} + +func subsysPath(root, inner, subsystem string) (string, error) { + // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. + if filepath.IsAbs(inner) { + mnt, err := cgroups.FindCgroupMountpoint(root, subsystem) + // If we didn't mount the subsystem, there is no point we make the path. + if err != nil { + return "", err + } + + // Sometimes subsystems can be mounted together as 'cpu,cpuacct'. + return filepath.Join(root, filepath.Base(mnt), inner), nil + } + + // Use GetOwnCgroupPath instead of GetInitCgroupPath, because the creating + // process could in container and shared pid namespace with host, and + // /proc/1/cgroup could point to whole other world of cgroups. + parentPath, err := cgroups.GetOwnCgroupPath(subsystem) + if err != nil { + return "", err + } + + return filepath.Join(parentPath, inner), nil +} + +func apply(path string, pid int) error { + if path == "" { + return nil + } + if err := os.MkdirAll(path, 0o755); err != nil { + return err + } + return cgroups.WriteCgroupProc(path, pid) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go index 1a306fbe3f7..b86955c8f31 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go @@ -1,5 +1,3 @@ -// +build linux - package fs import ( @@ -13,8 +11,8 @@ func (s *PerfEventGroup) Name() string { return "perf_event" } -func (s *PerfEventGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *PerfEventGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *PerfEventGroup) Set(_ string, _ *configs.Resources) error { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go index 1b08433c450..1f13532a5ad 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go @@ -1,10 +1,7 @@ -// +build linux - package fs import ( - "fmt" - "path/filepath" + "math" "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" @@ -18,8 +15,8 @@ func (s *PidsGroup) Name() string { return "pids" } -func (s *PidsGroup) Apply(path string, d *cgroupData) error { - return join(path, d.pid) +func (s *PidsGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) } func (s *PidsGroup) Set(path string, r *configs.Resources) error { @@ -45,21 +42,18 @@ func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error { } current, err := fscommon.GetCgroupParamUint(path, "pids.current") if err != nil { - return fmt.Errorf("failed to parse pids.current - %s", err) + return err } - maxString, err := fscommon.GetCgroupParamString(path, "pids.max") + max, err := fscommon.GetCgroupParamUint(path, "pids.max") if err != nil { - return fmt.Errorf("failed to parse pids.max - %s", err) + return err } - - // Default if pids.max == "max" is 0 -- which represents "no limit". - var max uint64 - if maxString != "max" { - max, err = fscommon.ParseUint(maxString, 10, 64) - if err != nil { - return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max")) - } + // If no limit is set, read from pids.max returns "max", which is + // converted to MaxUint64 by GetCgroupParamUint. Historically, we + // represent "no limit" for pids as 0, thus this conversion. + if max == math.MaxUint64 { + max = 0 } stats.PidsStats.Current = current diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go new file mode 100644 index 00000000000..5bbe0f35f81 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/rdma.go @@ -0,0 +1,25 @@ +package fs + +import ( + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" + "github.com/opencontainers/runc/libcontainer/configs" +) + +type RdmaGroup struct{} + +func (s *RdmaGroup) Name() string { + return "rdma" +} + +func (s *RdmaGroup) Apply(path string, _ *configs.Resources, pid int) error { + return apply(path, pid) +} + +func (s *RdmaGroup) Set(path string, r *configs.Resources) error { + return fscommon.RdmaSet(path, r) +} + +func (s *RdmaGroup) GetStats(path string, stats *cgroups.Stats) error { + return fscommon.RdmaGetStats(path, stats) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/unsupported.go deleted file mode 100644 index 3ef9e031586..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package fs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go index 25c47c9617e..bbbae4d58c4 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpu.go @@ -1,5 +1,3 @@ -// +build linux - package fs2 import ( @@ -49,7 +47,8 @@ func setCpu(dirPath string, r *configs.Resources) error { } func statCpu(dirPath string, stats *cgroups.Stats) error { - f, err := cgroups.OpenFile(dirPath, "cpu.stat", os.O_RDONLY) + const file = "cpu.stat" + f, err := cgroups.OpenFile(dirPath, file, os.O_RDONLY) if err != nil { return err } @@ -59,7 +58,7 @@ func statCpu(dirPath string, stats *cgroups.Stats) error { for sc.Scan() { t, v, err := fscommon.ParseKeyValue(sc.Text()) if err != nil { - return err + return &parseError{Path: dirPath, File: file, Err: err} } switch t { case "usage_usec": @@ -81,5 +80,8 @@ func statCpu(dirPath string, stats *cgroups.Stats) error { stats.CpuStats.ThrottlingData.ThrottledTime = v * 1000 } } + if err := sc.Err(); err != nil { + return &parseError{Path: dirPath, File: file, Err: err} + } return nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go index da29d7f2bbb..16c45bad8f0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/cpuset.go @@ -1,5 +1,3 @@ -// +build linux - package fs2 import ( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go index ba81ce0b41b..9c949c91f08 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/defaultpath.go @@ -18,41 +18,37 @@ package fs2 import ( "bufio" + "errors" + "fmt" "io" "os" "path/filepath" "strings" "github.com/opencontainers/runc/libcontainer/configs" - libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" - "github.com/pkg/errors" + "github.com/opencontainers/runc/libcontainer/utils" ) const UnifiedMountpoint = "/sys/fs/cgroup" func defaultDirPath(c *configs.Cgroup) (string, error) { if (c.Name != "" || c.Parent != "") && c.Path != "" { - return "", errors.Errorf("cgroup: either Path or Name and Parent should be used, got %+v", c) + return "", fmt.Errorf("cgroup: either Path or Name and Parent should be used, got %+v", c) } - if len(c.Paths) != 0 { - // never set by specconv - return "", errors.Errorf("cgroup: Paths is unsupported, use Path, got %+v", c) - } - - // XXX: Do not remove this code. Path safety is important! -- cyphar - cgPath := libcontainerUtils.CleanPath(c.Path) - cgParent := libcontainerUtils.CleanPath(c.Parent) - cgName := libcontainerUtils.CleanPath(c.Name) - return _defaultDirPath(UnifiedMountpoint, cgPath, cgParent, cgName) + return _defaultDirPath(UnifiedMountpoint, c.Path, c.Parent, c.Name) } func _defaultDirPath(root, cgPath, cgParent, cgName string) (string, error) { if (cgName != "" || cgParent != "") && cgPath != "" { return "", errors.New("cgroup: either Path or Name and Parent should be used") } - innerPath := cgPath + + // XXX: Do not remove CleanPath. Path safety is important! -- cyphar + innerPath := utils.CleanPath(cgPath) if innerPath == "" { + cgParent := utils.CleanPath(cgParent) + cgName := utils.CleanPath(cgName) innerPath = filepath.Join(cgParent, cgName) } if filepath.IsAbs(innerPath) { @@ -89,7 +85,7 @@ func parseCgroupFromReader(r io.Reader) (string, error) { parts = strings.SplitN(text, ":", 3) ) if len(parts) < 3 { - return "", errors.Errorf("invalid cgroup entry: %q", text) + return "", fmt.Errorf("invalid cgroup entry: %q", text) } // text is like "0::/user.slice/user-1001.slice/session-1.scope" if parts[0] == "0" && parts[1] == "" { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go index 7c501cad8a6..0d23456072c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/devices.go @@ -1,16 +1,15 @@ -// +build linux - package fs2 import ( + "fmt" + + "golang.org/x/sys/unix" + "github.com/opencontainers/runc/libcontainer/cgroups/ebpf" "github.com/opencontainers/runc/libcontainer/cgroups/ebpf/devicefilter" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runc/libcontainer/userns" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" ) func isRWM(perms devices.Permissions) bool { @@ -64,7 +63,7 @@ func setDevices(dirPath string, r *configs.Resources) error { } dirFD, err := unix.Open(dirPath, unix.O_DIRECTORY|unix.O_RDONLY, 0o600) if err != nil { - return errors.Errorf("cannot get dir FD for %s", dirPath) + return fmt.Errorf("cannot get dir FD for %s", dirPath) } defer unix.Close(dirFD) if _, err := ebpf.LoadAttachCgroupDeviceFilter(insts, license, dirFD); err != nil { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go index e901f7a07b1..8917a6411d6 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/freezer.go @@ -1,19 +1,17 @@ -// +build linux - package fs2 import ( "bufio" - stdErrors "errors" + "errors" "fmt" "os" "strings" "time" + "golang.org/x/sys/unix" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/pkg/errors" - "golang.org/x/sys/unix" ) func setFreezer(dirPath string, state configs.FreezerState) error { @@ -26,7 +24,7 @@ func setFreezer(dirPath string, state configs.FreezerState) error { case configs.Thawed: stateStr = "0" default: - return errors.Errorf("invalid freezer state %q requested", state) + return fmt.Errorf("invalid freezer state %q requested", state) } fd, err := cgroups.OpenFile(dirPath, "cgroup.freeze", unix.O_RDWR) @@ -37,7 +35,7 @@ func setFreezer(dirPath string, state configs.FreezerState) error { if state != configs.Frozen { return nil } - return errors.Wrap(err, "freezer not supported") + return fmt.Errorf("freezer not supported: %w", err) } defer fd.Close() @@ -48,7 +46,7 @@ func setFreezer(dirPath string, state configs.FreezerState) error { if actualState, err := readFreezer(dirPath, fd); err != nil { return err } else if actualState != state { - return errors.Errorf(`expected "cgroup.freeze" to be in state %q but was in %q`, state, actualState) + return fmt.Errorf(`expected "cgroup.freeze" to be in state %q but was in %q`, state, actualState) } return nil } @@ -58,7 +56,7 @@ func getFreezer(dirPath string) (configs.FreezerState, error) { if err != nil { // If the kernel is too old, then we just treat the freezer as being in // an "undefined" state. - if os.IsNotExist(err) || stdErrors.Is(err, unix.ENODEV) { + if os.IsNotExist(err) || errors.Is(err, unix.ENODEV) { err = nil } return configs.Undefined, err @@ -82,7 +80,7 @@ func readFreezer(dirPath string, fd *os.File) (configs.FreezerState, error) { case "1\n": return waitFrozen(dirPath) default: - return configs.Undefined, errors.Errorf(`unknown "cgroup.freeze" state: %q`, state) + return configs.Undefined, fmt.Errorf(`unknown "cgroup.freeze" state: %q`, state) } } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go index afba0ab1c88..492778e3105 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/fs2.go @@ -1,8 +1,7 @@ -// +build linux - package fs2 import ( + "errors" "fmt" "os" "strings" @@ -10,9 +9,10 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/pkg/errors" ) +type parseError = fscommon.ParseError + type manager struct { config *configs.Cgroup // dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope" @@ -20,16 +20,12 @@ type manager struct { // controllers is content of "cgroup.controllers" file. // excludes pseudo-controllers ("devices" and "freezer"). controllers map[string]struct{} - rootless bool } // NewManager creates a manager for cgroup v2 unified hierarchy. // dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope". // If dirPath is empty, it is automatically set using config. -func NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) { - if config == nil { - config = &configs.Cgroup{} - } +func NewManager(config *configs.Cgroup, dirPath string) (cgroups.Manager, error) { if dirPath == "" { var err error dirPath, err = defaultDirPath(config) @@ -39,9 +35,8 @@ func NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups. } m := &manager{ - config: config, - dirPath: dirPath, - rootless: rootless, + config: config, + dirPath: dirPath, } return m, nil } @@ -53,7 +48,7 @@ func (m *manager) getControllers() error { data, err := cgroups.ReadFile(m.dirPath, "cgroup.controllers") if err != nil { - if m.rootless && m.config.Path == "" { + if m.config.Rootless && m.config.Path == "" { return nil } return err @@ -73,12 +68,12 @@ func (m *manager) Apply(pid int) error { // - "runc create (no limits + no cgrouppath + no permission) succeeds" // - "runc create (rootless + no limits + cgrouppath + no permission) fails with permission error" // - "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error" - if m.rootless { + if m.config.Rootless { if m.config.Path == "" { if blNeed, nErr := needAnyControllers(m.config.Resources); nErr == nil && !blNeed { return nil } - return errors.Wrap(err, "rootless needs no limits + no cgrouppath when no permission is granted for cgroups") + return fmt.Errorf("rootless needs no limits + no cgrouppath when no permission is granted for cgroups: %w", err) } } return err @@ -123,13 +118,20 @@ func (m *manager) GetStats() (*cgroups.Stats, error) { if err := statHugeTlb(m.dirPath, st); err != nil && !os.IsNotExist(err) { errs = append(errs, err) } - if len(errs) > 0 && !m.rootless { - return st, errors.Errorf("error while statting cgroup v2: %+v", errs) + // rdma (since kernel 4.11) + if err := fscommon.RdmaGetStats(m.dirPath, st); err != nil && !os.IsNotExist(err) { + errs = append(errs, err) + } + if len(errs) > 0 && !m.config.Rootless { + return st, fmt.Errorf("error while statting cgroup v2: %+v", errs) } return st, nil } func (m *manager) Freeze(state configs.FreezerState) error { + if m.config.Resources == nil { + return errors.New("cannot toggle freezer: cgroups not configured for container") + } if err := setFreezer(m.dirPath, state); err != nil { return err } @@ -146,6 +148,9 @@ func (m *manager) Path(_ string) string { } func (m *manager) Set(r *configs.Resources) error { + if r == nil { + return nil + } if err := m.getControllers(); err != nil { return err } @@ -167,10 +172,10 @@ func (m *manager) Set(r *configs.Resources) error { } // devices (since kernel 4.15, pseudo-controller) // - // When m.rootless is true, errors from the device subsystem are ignored because it is really not expected to work. + // When rootless is true, errors from the device subsystem are ignored because it is really not expected to work. // However, errors from other subsystems are not ignored. // see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error" - if err := setDevices(m.dirPath, r); err != nil && !m.rootless { + if err := setDevices(m.dirPath, r); err != nil && !m.config.Rootless { return err } // cpuset (since kernel 5.0) @@ -181,6 +186,10 @@ func (m *manager) Set(r *configs.Resources) error { if err := setHugeTlb(m.dirPath, r); err != nil { return err } + // rdma (since kernel 4.11) + if err := fscommon.RdmaSet(m.dirPath, r); err != nil { + return err + } // freezer (since kernel 5.2, pseudo-controller) if err := setFreezer(m.dirPath, r.Freezer); err != nil { return err @@ -198,9 +207,8 @@ func (m *manager) setUnified(res map[string]string) error { return fmt.Errorf("unified resource %q must be a file name (no slashes)", k) } if err := cgroups.WriteFile(m.dirPath, k, v); err != nil { - errC := errors.Cause(err) // Check for both EPERM and ENOENT since O_CREAT is used by WriteFile. - if errors.Is(errC, os.ErrPermission) || errors.Is(errC, os.ErrNotExist) { + if errors.Is(err, os.ErrPermission) || errors.Is(err, os.ErrNotExist) { // Check if a controller is available, // to give more specific error if not. sk := strings.SplitN(k, ".", 2) @@ -212,7 +220,7 @@ func (m *manager) setUnified(res map[string]string) error { return fmt.Errorf("unified resource %q can't be set: controller %q not available", k, c) } } - return errors.Wrapf(err, "can't set unified resource %q", k) + return fmt.Errorf("unable to set unified resource %q: %w", k, err) } } @@ -243,7 +251,7 @@ func OOMKillCount(path string) (uint64, error) { func (m *manager) OOMKillCount() (uint64, error) { c, err := OOMKillCount(m.dirPath) - if err != nil && m.rootless && os.IsNotExist(err) { + if err != nil && m.config.Rootless && os.IsNotExist(err) { err = nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go index 96390a224f1..c92a7e64af0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/hugetlb.go @@ -1,12 +1,8 @@ -// +build linux - package fs2 import ( "strconv" - "github.com/pkg/errors" - "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" @@ -30,10 +26,8 @@ func setHugeTlb(dirPath string, r *configs.Resources) error { } func statHugeTlb(dirPath string, stats *cgroups.Stats) error { - hugePageSizes, _ := cgroups.GetHugePageSize() hugetlbStats := cgroups.HugetlbStats{} - - for _, pagesize := range hugePageSizes { + for _, pagesize := range cgroups.HugePageSizes() { value, err := fscommon.GetCgroupParamUint(dirPath, "hugetlb."+pagesize+".current") if err != nil { return err @@ -43,7 +37,7 @@ func statHugeTlb(dirPath string, stats *cgroups.Stats) error { fileName := "hugetlb." + pagesize + ".events" value, err = fscommon.GetValueByKey(dirPath, fileName, "max") if err != nil { - return errors.Wrap(err, "failed to read stats") + return err } hugetlbStats.Failcnt = value diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go index fd3f0993ea0..b2ff7d34086 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/io.go @@ -1,5 +1,3 @@ -// +build linux - package fs2 import ( @@ -117,13 +115,14 @@ func readCgroup2MapFile(dirPath string, name string) (map[string][]string, error ret[parts[0]] = parts[1:] } if err := scanner.Err(); err != nil { - return nil, err + return nil, &parseError{Path: dirPath, File: name, Err: err} } return ret, nil } func statIo(dirPath string, stats *cgroups.Stats) error { - values, err := readCgroup2MapFile(dirPath, "io.stat") + const file = "io.stat" + values, err := readCgroup2MapFile(dirPath, file) if err != nil { return err } @@ -136,11 +135,11 @@ func statIo(dirPath string, stats *cgroups.Stats) error { } major, err := strconv.ParseUint(d[0], 10, 64) if err != nil { - return err + return &parseError{Path: dirPath, File: file, Err: err} } minor, err := strconv.ParseUint(d[1], 10, 64) if err != nil { - return err + return &parseError{Path: dirPath, File: file, Err: err} } for _, item := range v { @@ -177,7 +176,7 @@ func statIo(dirPath string, stats *cgroups.Stats) error { value, err := strconv.ParseUint(d[1], 10, 64) if err != nil { - return err + return &parseError{Path: dirPath, File: file, Err: err} } entry := cgroups.BlkioStatEntry{ diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go index 53e8f1e9349..adbc4b23086 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go @@ -1,19 +1,18 @@ -// +build linux - package fs2 import ( "bufio" + "errors" "math" "os" "strconv" "strings" + "golang.org/x/sys/unix" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/pkg/errors" - "golang.org/x/sys/unix" ) // numToStr converts an int64 value to a string for writing to a @@ -75,8 +74,8 @@ func setMemory(dirPath string, r *configs.Resources) error { } func statMemory(dirPath string, stats *cgroups.Stats) error { - // Set stats from memory.stat. - statsFile, err := cgroups.OpenFile(dirPath, "memory.stat", os.O_RDONLY) + const file = "memory.stat" + statsFile, err := cgroups.OpenFile(dirPath, file, os.O_RDONLY) if err != nil { return err } @@ -86,10 +85,13 @@ func statMemory(dirPath string, stats *cgroups.Stats) error { for sc.Scan() { t, v, err := fscommon.ParseKeyValue(sc.Text()) if err != nil { - return errors.Wrapf(err, "failed to parse memory.stat (%q)", sc.Text()) + return &parseError{Path: dirPath, File: file, Err: err} } stats.MemoryStats.Stats[t] = v } + if err := sc.Err(); err != nil { + return &parseError{Path: dirPath, File: file, Err: err} + } stats.MemoryStats.Cache = stats.MemoryStats.Stats["file"] // Unlike cgroup v1 which has memory.use_hierarchy binary knob, // cgroup v2 is always hierarchical. @@ -139,13 +141,13 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { // swapaccount=0 kernel boot parameter is given. return cgroups.MemoryData{}, nil } - return cgroups.MemoryData{}, errors.Wrapf(err, "failed to parse %s", usage) + return cgroups.MemoryData{}, err } memoryData.Usage = value value, err = fscommon.GetCgroupParamUint(path, limit) if err != nil { - return cgroups.MemoryData{}, errors.Wrapf(err, "failed to parse %s", limit) + return cgroups.MemoryData{}, err } memoryData.Limit = value @@ -153,7 +155,8 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { } func statsFromMeminfo(stats *cgroups.Stats) error { - f, err := os.Open("/proc/meminfo") + const file = "/proc/meminfo" + f, err := os.Open(file) if err != nil { return err } @@ -190,7 +193,7 @@ func statsFromMeminfo(stats *cgroups.Stats) error { vStr := strings.TrimSpace(strings.TrimSuffix(parts[1], " kB")) *p, err = strconv.ParseUint(vStr, 10, 64) if err != nil { - return errors.Wrap(err, "parsing /proc/meminfo "+k) + return &parseError{File: file, Err: errors.New("bad value for " + k)} } found++ @@ -199,8 +202,8 @@ func statsFromMeminfo(stats *cgroups.Stats) error { break } } - if sc.Err() != nil { - return sc.Err() + if err := sc.Err(); err != nil { + return &parseError{Path: "", File: file, Err: err} } stats.MemoryStats.SwapUsage.Usage = (swap_total - swap_free) * 1024 diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go index e2050002d0e..c8c4a365894 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/pids.go @@ -1,17 +1,16 @@ -// +build linux - package fs2 import ( + "errors" + "math" "os" - "path/filepath" "strings" + "golang.org/x/sys/unix" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fscommon" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/pkg/errors" - "golang.org/x/sys/unix" ) func isPidsSet(r *configs.Resources) bool { @@ -53,22 +52,18 @@ func statPids(dirPath string, stats *cgroups.Stats) error { if os.IsNotExist(err) { return statPidsFromCgroupProcs(dirPath, stats) } - return errors.Wrap(err, "failed to parse pids.current") + return err } - maxString, err := fscommon.GetCgroupParamString(dirPath, "pids.max") + max, err := fscommon.GetCgroupParamUint(dirPath, "pids.max") if err != nil { - return errors.Wrap(err, "failed to parse pids.max") + return err } - - // Default if pids.max == "max" is 0 -- which represents "no limit". - var max uint64 - if maxString != "max" { - max, err = fscommon.ParseUint(maxString, 10, 64) - if err != nil { - return errors.Wrapf(err, "failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", - maxString, filepath.Join(dirPath, "pids.max")) - } + // If no limit is set, read from pids.max returns "max", which is + // converted to MaxUint64 by GetCgroupParamUint. Historically, we + // represent "no limit" for pids as 0, thus this conversion. + if max == math.MaxUint64 { + max = 0 } stats.PidsStats.Current = current diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go new file mode 100644 index 00000000000..d463d15ee4e --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/rdma.go @@ -0,0 +1,121 @@ +package fscommon + +import ( + "bufio" + "errors" + "math" + "os" + "strconv" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/unix" +) + +// parseRdmaKV parses raw string to RdmaEntry. +func parseRdmaKV(raw string, entry *cgroups.RdmaEntry) error { + var value uint32 + + parts := strings.SplitN(raw, "=", 3) + + if len(parts) != 2 { + return errors.New("Unable to parse RDMA entry") + } + + k, v := parts[0], parts[1] + + if v == "max" { + value = math.MaxUint32 + } else { + val64, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return err + } + value = uint32(val64) + } + if k == "hca_handle" { + entry.HcaHandles = value + } else if k == "hca_object" { + entry.HcaObjects = value + } + + return nil +} + +// readRdmaEntries reads and converts array of rawstrings to RdmaEntries from file. +// example entry: mlx4_0 hca_handle=2 hca_object=2000 +func readRdmaEntries(dir, file string) ([]cgroups.RdmaEntry, error) { + rdmaEntries := make([]cgroups.RdmaEntry, 0) + fd, err := cgroups.OpenFile(dir, file, unix.O_RDONLY) + if err != nil { + return nil, err + } + defer fd.Close() //nolint:errorlint + scanner := bufio.NewScanner(fd) + for scanner.Scan() { + parts := strings.SplitN(scanner.Text(), " ", 4) + if len(parts) == 3 { + entry := new(cgroups.RdmaEntry) + entry.Device = parts[0] + err = parseRdmaKV(parts[1], entry) + if err != nil { + continue + } + err = parseRdmaKV(parts[2], entry) + if err != nil { + continue + } + + rdmaEntries = append(rdmaEntries, *entry) + } + } + return rdmaEntries, scanner.Err() +} + +// RdmaGetStats returns rdma stats such as totalLimit and current entries. +func RdmaGetStats(path string, stats *cgroups.Stats) error { + currentEntries, err := readRdmaEntries(path, "rdma.current") + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = nil + } + return err + } + maxEntries, err := readRdmaEntries(path, "rdma.max") + if err != nil { + return err + } + // If device got removed between reading two files, ignore returning stats. + if len(currentEntries) != len(maxEntries) { + return nil + } + + stats.RdmaStats = cgroups.RdmaStats{ + RdmaLimit: maxEntries, + RdmaCurrent: currentEntries, + } + + return nil +} + +func createCmdString(device string, limits configs.LinuxRdma) string { + cmdString := device + if limits.HcaHandles != nil { + cmdString += " hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10) + } + if limits.HcaObjects != nil { + cmdString += " hca_object=" + strconv.FormatUint(uint64(*limits.HcaObjects), 10) + } + return cmdString +} + +// RdmaSet sets RDMA resources. +func RdmaSet(path string, r *configs.Resources) error { + for device, limits := range r.Rdma { + if err := cgroups.WriteFile(path, "rdma.max", createCmdString(device, limits)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go index e31146ae9df..f4a51c9e56f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go @@ -1,11 +1,10 @@ -// +build linux - package fscommon import ( "errors" "fmt" "math" + "path" "strconv" "strings" @@ -13,8 +12,6 @@ import ( ) var ( - ErrNotValidFormat = errors.New("line is not a valid key value format") - // Deprecated: use cgroups.OpenFile instead. OpenFile = cgroups.OpenFile // Deprecated: use cgroups.ReadFile instead. @@ -23,6 +20,19 @@ var ( WriteFile = cgroups.WriteFile ) +// ParseError records a parse error details, including the file path. +type ParseError struct { + Path string + File string + Err error +} + +func (e *ParseError) Error() string { + return "unable to parse " + path.Join(e.Path, e.File) + ": " + e.Err.Error() +} + +func (e *ParseError) Unwrap() error { return e.Err } + // ParseUint converts a string to an uint64 integer. // Negative values are returned at zero as, due to kernel bugs, // some of the memory cgroup stats can be negative. @@ -34,7 +44,7 @@ func ParseUint(s string, base, bitSize int) (uint64, error) { // 2. Handle negative values lesser than MinInt64 if intErr == nil && intValue < 0 { return 0, nil - } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { + } else if errors.Is(intErr, strconv.ErrRange) && intValue < 0 { return 0, nil } @@ -56,7 +66,7 @@ func ParseKeyValue(t string) (string, uint64, error) { value, err := ParseUint(parts[1], 10, 64) if err != nil { - return "", 0, fmt.Errorf("unable to convert to uint64: %v", err) + return "", 0, err } return parts[0], value, nil @@ -71,11 +81,15 @@ func GetValueByKey(path, file, key string) (uint64, error) { return 0, err } - lines := strings.Split(string(content), "\n") + lines := strings.Split(content, "\n") for _, line := range lines { arr := strings.Split(line, " ") if len(arr) == 2 && arr[0] == key { - return ParseUint(arr[1], 10, 64) + val, err := ParseUint(arr[1], 10, 64) + if err != nil { + err = &ParseError{Path: path, File: file, Err: err} + } + return val, err } } @@ -96,7 +110,7 @@ func GetCgroupParamUint(path, file string) (uint64, error) { res, err := ParseUint(contents, 10, 64) if err != nil { - return res, fmt.Errorf("unable to parse file %q", path+"/"+file) + return res, &ParseError{Path: path, File: file, Err: err} } return res, nil } @@ -115,7 +129,7 @@ func GetCgroupParamInt(path, file string) (int64, error) { res, err := strconv.ParseInt(contents, 10, 64) if err != nil { - return res, fmt.Errorf("unable to parse %q as a int from Cgroup file %q", contents, path+"/"+file) + return res, &ParseError{Path: path, File: file, Err: err} } return res, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go new file mode 100644 index 00000000000..1355a510105 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go @@ -0,0 +1,27 @@ +package cgroups + +import ( + "io/fs" + "path/filepath" +) + +// GetAllPids returns all pids from the cgroup identified by path, and all its +// sub-cgroups. +func GetAllPids(path string) ([]int, error) { + var pids []int + err := filepath.WalkDir(path, func(p string, d fs.DirEntry, iErr error) error { + if iErr != nil { + return iErr + } + if !d.IsDir() { + return nil + } + cPids, err := readProcsFile(p) + if err != nil { + return err + } + pids = append(pids, cPids...) + return nil + }) + return pids, err +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go index e7f9c462635..40a81dd5a86 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go @@ -1,5 +1,3 @@ -// +build linux - package cgroups type ThrottlingData struct { @@ -126,7 +124,7 @@ type BlkioStatEntry struct { } type BlkioStats struct { - // number of bytes tranferred to and from the block device + // number of bytes transferred to and from the block device IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` @@ -146,6 +144,17 @@ type HugetlbStats struct { Failcnt uint64 `json:"failcnt"` } +type RdmaEntry struct { + Device string `json:"device,omitempty"` + HcaHandles uint32 `json:"hca_handles,omitempty"` + HcaObjects uint32 `json:"hca_objects,omitempty"` +} + +type RdmaStats struct { + RdmaLimit []RdmaEntry `json:"rdma_limit,omitempty"` + RdmaCurrent []RdmaEntry `json:"rdma_current,omitempty"` +} + type Stats struct { CpuStats CpuStats `json:"cpu_stats,omitempty"` CPUSetStats CPUSetStats `json:"cpuset_stats,omitempty"` @@ -154,6 +163,7 @@ type Stats struct { BlkioStats BlkioStats `json:"blkio_stats,omitempty"` // the map is in the format "size of hugepage: stats of the hugepage" HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"` + RdmaStats RdmaStats `json:"rdma_stats,omitempty"` } func NewStats() *Stats { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go index 06c6c2e95a6..98ccc51655c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go @@ -3,6 +3,7 @@ package systemd import ( "bufio" "context" + "errors" "fmt" "math" "os" @@ -14,11 +15,11 @@ import ( systemdDbus "github.com/coreos/go-systemd/v22/dbus" dbus "github.com/godbus/dbus/v5" + "github.com/sirupsen/logrus" + cgroupdevices "github.com/opencontainers/runc/libcontainer/cgroups/devices" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/devices" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -92,7 +93,7 @@ func groupPrefix(ruleType devices.Type) (string, error) { case devices.CharDevice: return "char-", nil default: - return "", errors.Errorf("device type %v has no group prefix", ruleType) + return "", fmt.Errorf("device type %v has no group prefix", ruleType) } } @@ -142,9 +143,9 @@ func findDeviceGroup(ruleType devices.Type, ruleMajor int64) (string, error) { ) if n, err := fmt.Sscanf(line, "%d %s", &currMajor, &currName); err != nil || n != 2 { if err == nil { - err = errors.Errorf("wrong number of fields") + err = errors.New("wrong number of fields") } - return "", errors.Wrapf(err, "scan /proc/devices line %q", line) + return "", fmt.Errorf("scan /proc/devices line %q: %w", line, err) } if currMajor == ruleMajor { @@ -152,7 +153,7 @@ func findDeviceGroup(ruleType devices.Type, ruleMajor int64) (string, error) { } } if err := scanner.Err(); err != nil { - return "", errors.Wrap(err, "reading /proc/devices") + return "", fmt.Errorf("reading /proc/devices: %w", err) } // Couldn't find the device group. return "", nil @@ -192,12 +193,12 @@ func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, err configEmu := &cgroupdevices.Emulator{} for _, rule := range r.Devices { if err := configEmu.Apply(*rule); err != nil { - return nil, errors.Wrap(err, "apply rule for systemd") + return nil, fmt.Errorf("unable to apply rule for systemd: %w", err) } } // systemd doesn't support blacklists. So we log a warning, and tell // systemd to act as a deny-all whitelist. This ruleset will be replaced - // with our normal fallback code. This may result in spurrious errors, but + // with our normal fallback code. This may result in spurious errors, but // the only other option is to error out here. if configEmu.IsBlacklist() { // However, if we're dealing with an allow-all rule then we can do it. @@ -213,19 +214,19 @@ func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, err // whitelist which is the default for devices.Emulator. finalRules, err := configEmu.Rules() if err != nil { - return nil, errors.Wrap(err, "get simplified rules for systemd") + return nil, fmt.Errorf("unable to get simplified rules for systemd: %w", err) } var deviceAllowList []deviceAllowEntry for _, rule := range finalRules { if !rule.Allow { // Should never happen. - return nil, errors.Errorf("[internal error] cannot add deny rule to systemd DeviceAllow list: %v", *rule) + return nil, fmt.Errorf("[internal error] cannot add deny rule to systemd DeviceAllow list: %v", *rule) } switch rule.Type { case devices.BlockDevice, devices.CharDevice: default: // Should never happen. - return nil, errors.Errorf("invalid device type for DeviceAllow: %v", rule.Type) + return nil, fmt.Errorf("invalid device type for DeviceAllow: %v", rule.Type) } entry := deviceAllowEntry{ @@ -271,7 +272,7 @@ func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, err // "_ n:* _" rules require a device group from /proc/devices. group, err := findDeviceGroup(rule.Type, rule.Major) if err != nil { - return nil, errors.Wrapf(err, "find device '%v/%d'", rule.Type, rule.Major) + return nil, fmt.Errorf("unable to find device '%v/%d': %w", rule.Type, rule.Major, err) } if group == "" { // Couldn't find a group. @@ -350,7 +351,7 @@ func startUnit(cm *dbusConnManager, unitName string, properties []systemdDbus.Pr // Please refer to https://pkg.go.dev/github.com/coreos/go-systemd/v22/dbus#Conn.StartUnit if s != "done" { resetFailedUnit(cm, unitName) - return errors.Errorf("error creating systemd unit `%s`: got `%s`", unitName, s) + return fmt.Errorf("error creating systemd unit `%s`: got `%s`", unitName, s) } case <-timeout.C: resetFailedUnit(cm, unitName) @@ -449,10 +450,13 @@ func systemdVersionAtoi(verStr string) (int, error) { re := regexp.MustCompile(`v?([0-9]+)`) matches := re.FindStringSubmatch(verStr) if len(matches) < 2 { - return 0, errors.Errorf("can't parse version %s: incorrect number of matches %v", verStr, matches) + return 0, fmt.Errorf("can't parse version %s: incorrect number of matches %v", verStr, matches) } ver, err := strconv.Atoi(matches[1]) - return ver, errors.Wrapf(err, "can't parse version %s", verStr) + if err != nil { + return -1, fmt.Errorf("can't parse version: %w", err) + } + return ver, nil } func addCpuQuota(cm *dbusConnManager, properties *[]systemdDbus.Property, quota int64, period uint64) { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go index 642a04e7dd9..83d10dd705f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go @@ -1,12 +1,10 @@ package systemd import ( - "encoding/binary" + "errors" + "math/big" "strconv" "strings" - - "github.com/bits-and-blooms/bitset" - "github.com/pkg/errors" ) // RangeToBits converts a text representation of a CPU mask (as written to @@ -14,7 +12,7 @@ import ( // with the corresponding bits set (as consumed by systemd over dbus as // AllowedCPUs/AllowedMemoryNodes unit property value). func RangeToBits(str string) ([]byte, error) { - bits := &bitset.BitSet{} + bits := new(big.Int) for _, r := range strings.Split(str, ",") { // allow extra spaces around @@ -36,32 +34,22 @@ func RangeToBits(str string) ([]byte, error) { if start > end { return nil, errors.New("invalid range: " + r) } - for i := uint(start); i <= uint(end); i++ { - bits.Set(i) + for i := start; i <= end; i++ { + bits.SetBit(bits, int(i), 1) } } else { val, err := strconv.ParseUint(ranges[0], 10, 32) if err != nil { return nil, err } - bits.Set(uint(val)) + bits.SetBit(bits, int(val), 1) } } - val := bits.Bytes() - if len(val) == 0 { + ret := bits.Bytes() + if len(ret) == 0 { // do not allow empty values return nil, errors.New("empty value") } - ret := make([]byte, len(val)*8) - for i := range val { - // bitset uses BigEndian internally - binary.BigEndian.PutUint64(ret[i*8:], val[len(val)-1-i]) - } - // remove upper all-zero bytes - for ret[0] == 0 { - ret = ret[1:] - } - return ret, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go index 06905645561..3e547e282b5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/dbus.go @@ -1,5 +1,3 @@ -// +build linux - package systemd import ( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unsupported.go deleted file mode 100644 index cb4d00c88b3..00000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/unsupported.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build !linux - -package systemd - -import ( - "errors" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type Manager struct { - Cgroups *configs.Cgroup - Paths map[string]string -} - -func IsRunningSystemd() bool { - return false -} - -func NewSystemdCgroupsManager() (func(config *configs.Cgroup, paths map[string]string) cgroups.Manager, error) { - return nil, errors.New("Systemd not supported") -} - -func (m *Manager) Apply(pid int) error { - return errors.New("Systemd not supported") -} - -func (m *Manager) GetPids() ([]int, error) { - return nil, errors.New("Systemd not supported") -} - -func (m *Manager) GetAllPids() ([]int, error) { - return nil, errors.New("Systemd not supported") -} - -func (m *Manager) Destroy() error { - return errors.New("Systemd not supported") -} - -func (m *Manager) GetPaths() map[string]string { - return nil -} - -func (m *Manager) Path(_ string) string { - return "" -} - -func (m *Manager) GetStats() (*cgroups.Stats, error) { - return nil, errors.New("Systemd not supported") -} - -func (m *Manager) Set(container *configs.Config) error { - return errors.New("Systemd not supported") -} - -func (m *Manager) Freeze(state configs.FreezerState) error { - return errors.New("Systemd not supported") -} - -func Freeze(c *configs.Cgroup, state configs.FreezerState) error { - return errors.New("Systemd not supported") -} - -func (m *Manager) GetCgroups() (*configs.Cgroup, error) { - return nil, errors.New("Systemd not supported") -} - -func (m *Manager) Exists() bool { - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/user.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/user.go index ddeaf664263..0f50f76eee4 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/user.go @@ -1,10 +1,10 @@ -// +build linux - package systemd import ( "bufio" "bytes" + "errors" + "fmt" "os" "os/exec" "path/filepath" @@ -13,8 +13,8 @@ import ( systemdDbus "github.com/coreos/go-systemd/v22/dbus" dbus "github.com/godbus/dbus/v5" + "github.com/opencontainers/runc/libcontainer/userns" - "github.com/pkg/errors" ) // newUserSystemdDbus creates a connection for systemd user-instance. @@ -31,17 +31,17 @@ func newUserSystemdDbus() (*systemdDbus.Conn, error) { return systemdDbus.NewConnection(func() (*dbus.Conn, error) { conn, err := dbus.Dial(addr) if err != nil { - return nil, errors.Wrapf(err, "error while dialing %q", addr) + return nil, fmt.Errorf("error while dialing %q: %w", addr, err) } methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))} err = conn.Auth(methods) if err != nil { conn.Close() - return nil, errors.Wrapf(err, "error while authenticating connection, address=%q, UID=%d", addr, uid) + return nil, fmt.Errorf("error while authenticating connection (address=%q, UID=%d): %w", addr, uid, err) } if err = conn.Hello(); err != nil { conn.Close() - return nil, errors.Wrapf(err, "error while sending Hello message, address=%q, UID=%d", addr, uid) + return nil, fmt.Errorf("error while sending Hello message (address=%q, UID=%d): %w", addr, uid, err) } return conn, nil }) @@ -57,7 +57,7 @@ func DetectUID() (int, error) { } b, err := exec.Command("busctl", "--user", "--no-pager", "status").CombinedOutput() if err != nil { - return -1, errors.Wrapf(err, "could not execute `busctl --user --no-pager status`: %q", string(b)) + return -1, fmt.Errorf("could not execute `busctl --user --no-pager status` (output: %q): %w", string(b), err) } scanner := bufio.NewScanner(bytes.NewReader(b)) for scanner.Scan() { @@ -66,7 +66,7 @@ func DetectUID() (int, error) { uidStr := strings.TrimPrefix(s, "OwnerUID=") i, err := strconv.Atoi(uidStr) if err != nil { - return -1, errors.Wrapf(err, "could not detect the OwnerUID: %s", s) + return -1, fmt.Errorf("could not detect the OwnerUID: %w", err) } return i, nil } @@ -93,7 +93,7 @@ func DetectUserDbusSessionBusAddress() (string, error) { } b, err := exec.Command("systemctl", "--user", "--no-pager", "show-environment").CombinedOutput() if err != nil { - return "", errors.Wrapf(err, "could not execute `systemctl --user --no-pager show-environment`, output=%q", string(b)) + return "", fmt.Errorf("could not execute `systemctl --user --no-pager show-environment` (output=%q): %w", string(b), err) } scanner := bufio.NewScanner(bytes.NewReader(b)) for scanner.Scan() { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go index cd4720c5d44..a74a05a5cd0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go @@ -1,5 +1,3 @@ -// +build linux - package systemd import ( @@ -26,12 +24,25 @@ type legacyManager struct { dbus *dbusConnManager } -func NewLegacyManager(cg *configs.Cgroup, paths map[string]string) cgroups.Manager { +func NewLegacyManager(cg *configs.Cgroup, paths map[string]string) (cgroups.Manager, error) { + if cg.Rootless { + return nil, errors.New("cannot use rootless systemd cgroups manager on cgroup v1") + } + if cg.Resources != nil && cg.Resources.Unified != nil { + return nil, cgroups.ErrV1NoUnified + } + if paths == nil { + var err error + paths, err = initPaths(cg) + if err != nil { + return nil, err + } + } return &legacyManager{ cgroups: cg, paths: paths, dbus: newDbusConnManager(false), - } + }, nil } type subsystem interface { @@ -59,6 +70,7 @@ var legacySubsystems = []subsystem{ &fs.NetPrioGroup{}, &fs.NetClsGroup{}, &fs.NameGroup{GroupName: "name=systemd"}, + &fs.RdmaGroup{}, } func genV1ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]systemdDbus.Property, error) { @@ -100,6 +112,53 @@ func genV1ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]syst return properties, nil } +// initPaths figures out and returns paths to cgroups. +func initPaths(c *configs.Cgroup) (map[string]string, error) { + slice := "system.slice" + if c.Parent != "" { + var err error + slice, err = ExpandSlice(c.Parent) + if err != nil { + return nil, err + } + } + + unit := getUnitName(c) + + paths := make(map[string]string) + for _, s := range legacySubsystems { + subsystemPath, err := getSubsystemPath(slice, unit, s.Name()) + if err != nil { + // Even if it's `not found` error, we'll return err + // because devices cgroup is hard requirement for + // container security. + if s.Name() == "devices" { + return nil, err + } + // Don't fail if a cgroup hierarchy was not found, just skip this subsystem + if cgroups.IsNotFound(err) { + continue + } + return nil, err + } + paths[s.Name()] = subsystemPath + } + + // If systemd is using cgroups-hybrid mode then add the slice path of + // this container to the paths so the following process executed with + // "runc exec" joins that cgroup as well. + if cgroups.IsCgroup2HybridMode() { + // "" means cgroup-hybrid path + cgroupsHybridPath, err := getSubsystemPath(slice, unit, "") + if err != nil && cgroups.IsNotFound(err) { + return nil, err + } + paths[""] = cgroupsHybridPath + } + + return paths, nil +} + func (m *legacyManager) Apply(pid int) error { var ( c = m.cgroups @@ -108,27 +167,8 @@ func (m *legacyManager) Apply(pid int) error { properties []systemdDbus.Property ) - if c.Resources.Unified != nil { - return cgroups.ErrV1NoUnified - } - m.mu.Lock() defer m.mu.Unlock() - if c.Paths != nil { - paths := make(map[string]string) - cgMap, err := cgroups.ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return err - } - // XXX(kolyshkin@): why this check is needed? - for name, path := range c.Paths { - if _, ok := cgMap[name]; ok { - paths[name] = path - } - } - m.paths = paths - return cgroups.EnterPid(m.paths, pid) - } if c.Parent != "" { slice = c.Parent @@ -136,12 +176,14 @@ func (m *legacyManager) Apply(pid int) error { properties = append(properties, systemdDbus.PropDescription("libcontainer container "+c.Name)) - // if we create a slice, the parent is defined via a Wants= if strings.HasSuffix(unitName, ".slice") { + // If we create a slice, the parent is defined via a Wants=. properties = append(properties, systemdDbus.PropWants(slice)) } else { - // otherwise, we use Slice= + // Otherwise it's a scope, which we put into a Slice=. properties = append(properties, systemdDbus.PropSlice(slice)) + // Assume scopes always support delegation (supported since systemd v218). + properties = append(properties, newProp("Delegate", true)) } // only add pid if its valid, -1 is used w/ general slice creation. @@ -149,12 +191,6 @@ func (m *legacyManager) Apply(pid int) error { properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) } - // Check if we can delegate. This is only supported on systemd versions 218 and above. - if !strings.HasSuffix(unitName, ".slice") { - // Assume scopes always support delegation. - properties = append(properties, newProp("Delegate", true)) - } - // Always enable accounting, this gets us the same behaviour as the fs implementation, // plus the kernel has some problems with joining the memory cgroup at a later time. properties = append(properties, @@ -174,26 +210,6 @@ func (m *legacyManager) Apply(pid int) error { return err } - paths := make(map[string]string) - for _, s := range legacySubsystems { - subsystemPath, err := getSubsystemPath(m.cgroups, s.Name()) - if err != nil { - // Even if it's `not found` error, we'll return err - // because devices cgroup is hard requirement for - // container security. - if s.Name() == "devices" { - return err - } - // Don't fail if a cgroup hierarchy was not found, just skip this subsystem - if cgroups.IsNotFound(err) { - continue - } - return err - } - paths[s.Name()] = subsystemPath - } - m.paths = paths - if err := m.joinCgroups(pid); err != nil { return err } @@ -202,9 +218,6 @@ func (m *legacyManager) Apply(pid int) error { } func (m *legacyManager) Destroy() error { - if m.cgroups.Paths != nil { - return nil - } m.mu.Lock() defer m.mu.Unlock() @@ -254,7 +267,7 @@ func (m *legacyManager) joinCgroups(pid int) error { return nil } -func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) { +func getSubsystemPath(slice, unit, subsystem string) (string, error) { mountpoint, err := cgroups.FindCgroupMountpoint("", subsystem) if err != nil { return "", err @@ -267,17 +280,7 @@ func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) { // if pid 1 is systemd 226 or later, it will be in init.scope, not the root initPath = strings.TrimSuffix(filepath.Clean(initPath), "init.scope") - slice := "system.slice" - if c.Parent != "" { - slice = c.Parent - } - - slice, err = ExpandSlice(slice) - if err != nil { - return "", err - } - - return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil + return filepath.Join(mountpoint, initPath, slice, unit), nil } func (m *legacyManager) Freeze(state configs.FreezerState) error { @@ -399,9 +402,7 @@ func (m *legacyManager) freezeBeforeSet(unitName string, r *configs.Resources) ( } func (m *legacyManager) Set(r *configs.Resources) error { - // If Paths are set, then we are just joining cgroups paths - // and there is no need to set any values. - if m.cgroups.Paths != nil { + if r == nil { return nil } if r.Unified != nil { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go index 55273b722c1..c31f0ecfd25 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go @@ -1,10 +1,10 @@ -// +build linux - package systemd import ( + "bufio" "fmt" "math" + "os" "path/filepath" "strconv" "strings" @@ -12,29 +12,39 @@ import ( systemdDbus "github.com/coreos/go-systemd/v22/dbus" securejoin "github.com/cyphar/filepath-securejoin" + "github.com/sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs2" "github.com/opencontainers/runc/libcontainer/configs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type unifiedManager struct { mu sync.Mutex cgroups *configs.Cgroup // path is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope" - path string - rootless bool - dbus *dbusConnManager + path string + dbus *dbusConnManager + fsMgr cgroups.Manager } -func NewUnifiedManager(config *configs.Cgroup, path string, rootless bool) cgroups.Manager { - return &unifiedManager{ - cgroups: config, - path: path, - rootless: rootless, - dbus: newDbusConnManager(rootless), +func NewUnifiedManager(config *configs.Cgroup, path string) (cgroups.Manager, error) { + m := &unifiedManager{ + cgroups: config, + path: path, + dbus: newDbusConnManager(config.Rootless), + } + if err := m.initPath(); err != nil { + return nil, err + } + + fsMgr, err := fs2.NewManager(config, m.path) + if err != nil { + return nil, err } + m.fsMgr = fsMgr + + return m, nil } // unifiedResToSystemdProps tries to convert from Cgroup.Resources.Unified @@ -233,12 +243,8 @@ func (m *unifiedManager) Apply(pid int) error { properties []systemdDbus.Property ) - if c.Paths != nil { - return cgroups.WriteCgroupProc(m.path, pid) - } - slice := "system.slice" - if m.rootless { + if m.cgroups.Rootless { slice = "user.slice" } if c.Parent != "" { @@ -247,12 +253,14 @@ func (m *unifiedManager) Apply(pid int) error { properties = append(properties, systemdDbus.PropDescription("libcontainer container "+c.Name)) - // if we create a slice, the parent is defined via a Wants= if strings.HasSuffix(unitName, ".slice") { + // If we create a slice, the parent is defined via a Wants=. properties = append(properties, systemdDbus.PropWants(slice)) } else { - // otherwise, we use Slice= + // Otherwise it's a scope, which we put into a Slice=. properties = append(properties, systemdDbus.PropSlice(slice)) + // Assume scopes always support delegation (supported since systemd v218). + properties = append(properties, newProp("Delegate", true)) } // only add pid if its valid, -1 is used w/ general slice creation. @@ -260,12 +268,6 @@ func (m *unifiedManager) Apply(pid int) error { properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) } - // Check if we can delegate. This is only supported on systemd versions 218 and above. - if !strings.HasSuffix(unitName, ".slice") { - // Assume scopes always support delegation. - properties = append(properties, newProp("Delegate", true)) - } - // Always enable accounting, this gets us the same behaviour as the fs implementation, // plus the kernel has some problems with joining the memory cgroup at a later time. properties = append(properties, @@ -282,22 +284,53 @@ func (m *unifiedManager) Apply(pid int) error { properties = append(properties, c.SystemdProps...) if err := startUnit(m.dbus, unitName, properties); err != nil { - return errors.Wrapf(err, "error while starting unit %q with properties %+v", unitName, properties) + return fmt.Errorf("unable to start unit %q (properties %+v): %w", unitName, properties, err) } - if err := m.initPath(); err != nil { - return err - } if err := fs2.CreateCgroupPath(m.path, m.cgroups); err != nil { return err } + + if c.OwnerUID != nil { + filesToChown, err := cgroupFilesToChown() + if err != nil { + return err + } + + for _, v := range filesToChown { + err := os.Chown(m.path+"/"+v, *c.OwnerUID, -1) + if err != nil { + return err + } + } + } + return nil } -func (m *unifiedManager) Destroy() error { - if m.cgroups.Paths != nil { - return nil +// The kernel exposes a list of files that should be chowned to the delegate +// uid in /sys/kernel/cgroup/delegate. If the file is not present +// (Linux < 4.15), use the initial values mentioned in cgroups(7). +func cgroupFilesToChown() ([]string, error) { + filesToChown := []string{"."} // the directory itself must be chowned + const cgroupDelegateFile = "/sys/kernel/cgroup/delegate" + f, err := os.Open(cgroupDelegateFile) + if err == nil { + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + filesToChown = append(filesToChown, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading %s: %w", cgroupDelegateFile, err) + } + } else { + filesToChown = append(filesToChown, "cgroup.procs", "cgroup.subtree_control", "cgroup.threads") } + return filesToChown, nil +} + +func (m *unifiedManager) Destroy() error { m.mu.Lock() defer m.mu.Unlock() @@ -307,8 +340,8 @@ func (m *unifiedManager) Destroy() error { } // systemd 239 do not remove sub-cgroups. - err := cgroups.RemovePath(m.path) - // cgroups.RemovePath has handled ErrNotExist + err := m.fsMgr.Destroy() + // fsMgr.Destroy has handled ErrNotExist if err != nil { return err } @@ -317,7 +350,6 @@ func (m *unifiedManager) Destroy() error { } func (m *unifiedManager) Path(_ string) string { - _ = m.initPath() return m.path } @@ -326,7 +358,7 @@ func (m *unifiedManager) Path(_ string) string { func (m *unifiedManager) getSliceFull() (string, error) { c := m.cgroups slice := "system.slice" - if m.rootless { + if c.Rootless { slice = "user.slice" } if c.Parent != "" { @@ -337,7 +369,7 @@ func (m *unifiedManager) getSliceFull() (string, error) { } } - if m.rootless { + if c.Rootless { // managerCG is typically "/user.slice/user-${uid}.slice/user@${uid}.service". managerCG, err := getManagerProperty(m.dbus, "ControlGroup") if err != nil { @@ -375,58 +407,36 @@ func (m *unifiedManager) initPath() error { return nil } -func (m *unifiedManager) fsManager() (cgroups.Manager, error) { - if err := m.initPath(); err != nil { - return nil, err - } - return fs2.NewManager(m.cgroups, m.path, m.rootless) -} - func (m *unifiedManager) Freeze(state configs.FreezerState) error { - fsMgr, err := m.fsManager() - if err != nil { - return err - } - return fsMgr.Freeze(state) + return m.fsMgr.Freeze(state) } func (m *unifiedManager) GetPids() ([]int, error) { - if err := m.initPath(); err != nil { - return nil, err - } return cgroups.GetPids(m.path) } func (m *unifiedManager) GetAllPids() ([]int, error) { - if err := m.initPath(); err != nil { - return nil, err - } return cgroups.GetAllPids(m.path) } func (m *unifiedManager) GetStats() (*cgroups.Stats, error) { - fsMgr, err := m.fsManager() - if err != nil { - return nil, err - } - return fsMgr.GetStats() + return m.fsMgr.GetStats() } func (m *unifiedManager) Set(r *configs.Resources) error { + if r == nil { + return nil + } properties, err := genV2ResourcesProperties(r, m.dbus) if err != nil { return err } if err := setUnitProperties(m.dbus, getUnitName(m.cgroups), properties...); err != nil { - return errors.Wrap(err, "error while setting unit properties") + return fmt.Errorf("unable to set unit properties: %w", err) } - fsMgr, err := m.fsManager() - if err != nil { - return err - } - return fsMgr.Set(r) + return m.fsMgr.Set(r) } func (m *unifiedManager) GetPaths() map[string]string { @@ -440,11 +450,7 @@ func (m *unifiedManager) GetCgroups() (*configs.Cgroup, error) { } func (m *unifiedManager) GetFreezerState() (configs.FreezerState, error) { - fsMgr, err := m.fsManager() - if err != nil { - return configs.Undefined, err - } - return fsMgr.GetFreezerState() + return m.fsMgr.GetFreezerState() } func (m *unifiedManager) Exists() bool { @@ -452,9 +458,5 @@ func (m *unifiedManager) Exists() bool { } func (m *unifiedManager) OOMKillCount() (uint64, error) { - fsMgr, err := m.fsManager() - if err != nil { - return 0, err - } - return fsMgr.OOMKillCount() + return m.fsMgr.OOMKillCount() } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index 92606525b4d..13ebf52abe4 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -1,5 +1,3 @@ -// +build linux - package cgroups import ( @@ -7,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strconv" @@ -23,11 +20,14 @@ import ( const ( CgroupProcesses = "cgroup.procs" unifiedMountpoint = "/sys/fs/cgroup" + hybridMountpoint = "/sys/fs/cgroup/unified" ) var ( isUnifiedOnce sync.Once isUnified bool + isHybridOnce sync.Once + isHybrid bool ) // IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode. @@ -49,6 +49,24 @@ func IsCgroup2UnifiedMode() bool { return isUnified } +// IsCgroup2HybridMode returns whether we are running in cgroup v2 hybrid mode. +func IsCgroup2HybridMode() bool { + isHybridOnce.Do(func() { + var st unix.Statfs_t + err := unix.Statfs(hybridMountpoint, &st) + if err != nil { + if os.IsNotExist(err) { + // ignore the "not found" error + isHybrid = false + return + } + panic(fmt.Sprintf("cannot statfs cgroup root: %s", err)) + } + isHybrid = st.Type == unix.CGROUP2_SUPER_MAGIC + }) + return isHybrid +} + type Mount struct { Mountpoint string Root string @@ -118,8 +136,8 @@ func GetAllSubsystems() ([]string, error) { return subsystems, nil } -func readProcsFile(file string) ([]int, error) { - f, err := os.Open(file) +func readProcsFile(dir string) ([]int, error) { + f, err := OpenFile(dir, CgroupProcesses, os.O_RDONLY) if err != nil { return nil, err } @@ -210,7 +228,7 @@ func EnterPid(cgroupPaths map[string]string, pid int) error { func rmdir(path string) error { err := unix.Rmdir(path) - if err == nil || err == unix.ENOENT { + if err == nil || err == unix.ENOENT { //nolint:errorlint // unix errors are bare return nil } return &os.PathError{Op: "rmdir", Path: path, Err: err} @@ -224,7 +242,7 @@ func RemovePath(path string) error { return nil } - infos, err := ioutil.ReadDir(path) + infos, err := os.ReadDir(path) if err != nil { if os.IsNotExist(err) { err = nil @@ -284,40 +302,61 @@ func RemovePaths(paths map[string]string) (err error) { return fmt.Errorf("Failed to remove paths: %v", paths) } -func GetHugePageSize() ([]string, error) { - dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0) - if err != nil { - return nil, err - } - files, err := dir.Readdirnames(0) - dir.Close() - if err != nil { - return nil, err - } +var ( + hugePageSizes []string + initHPSOnce sync.Once +) - return getHugePageSizeFromFilenames(files) +func HugePageSizes() []string { + initHPSOnce.Do(func() { + dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0) + if err != nil { + return + } + files, err := dir.Readdirnames(0) + dir.Close() + if err != nil { + return + } + + hugePageSizes, err = getHugePageSizeFromFilenames(files) + if err != nil { + logrus.Warn("HugePageSizes: ", err) + } + }) + + return hugePageSizes } func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) { pageSizes := make([]string, 0, len(fileNames)) + var warn error for _, file := range fileNames { // example: hugepages-1048576kB val := strings.TrimPrefix(file, "hugepages-") if len(val) == len(file) { - // unexpected file name: no prefix found + // Unexpected file name: no prefix found, ignore it. continue } - // The suffix is always "kB" (as of Linux 5.9) + // The suffix is always "kB" (as of Linux 5.13). If we find + // something else, produce an error but keep going. eLen := len(val) - 2 val = strings.TrimSuffix(val, "kB") if len(val) != eLen { - logrus.Warnf("GetHugePageSize: %s: invalid filename suffix (expected \"kB\")", file) + // Highly unlikely. + if warn == nil { + warn = errors.New(file + `: invalid suffix (expected "kB")`) + } continue } size, err := strconv.Atoi(val) if err != nil { - return nil, err + // Highly unlikely. + if warn == nil { + warn = fmt.Errorf("%s: %w", file, err) + } + continue } // Model after https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/hugetlb_cgroup.c?id=eff48ddeab782e35e58ccc8853f7386bbae9dec4#n574 // but in our case the size is in KB already. @@ -331,34 +370,12 @@ func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) { pageSizes = append(pageSizes, val) } - return pageSizes, nil + return pageSizes, warn } // GetPids returns all pids, that were added to cgroup at path. func GetPids(dir string) ([]int, error) { - return readProcsFile(filepath.Join(dir, CgroupProcesses)) -} - -// GetAllPids returns all pids, that were added to cgroup at path and to all its -// subcgroups. -func GetAllPids(path string) ([]int, error) { - var pids []int - // collect pids from all sub-cgroups - err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error { - if iErr != nil { - return iErr - } - if info.IsDir() || info.Name() != CgroupProcesses { - return nil - } - cPids, err := readProcsFile(p) - if err != nil { - return err - } - pids = append(pids, cPids...) - return nil - }) - return pids, err + return readProcsFile(dir) } // WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file @@ -376,7 +393,7 @@ func WriteCgroupProc(dir string, pid int) error { file, err := OpenFile(dir, CgroupProcesses, os.O_WRONLY) if err != nil { - return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err) + return fmt.Errorf("failed to write %v: %w", pid, err) } defer file.Close() @@ -393,7 +410,7 @@ func WriteCgroupProc(dir string, pid int) error { continue } - return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err) + return fmt.Errorf("failed to write %v: %w", pid, err) } return err } @@ -446,5 +463,5 @@ func ConvertBlkIOToIOWeightValue(blkIoWeight uint16) uint64 { if blkIoWeight == 0 { return 0 } - return uint64(1 + (uint64(blkIoWeight)-10)*9999/990) + return 1 + (uint64(blkIoWeight)-10)*9999/990 } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go index 95ec9dff028..47c75f22b42 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go @@ -46,11 +46,8 @@ func NewNotFoundError(sub string) error { } func IsNotFound(err error) bool { - if err == nil { - return false - } - _, ok := err.(*NotFoundError) - return ok + var nfErr *NotFoundError + return errors.As(err, &nfErr) } func tryDefaultPath(cgroupPath, subsystem string) string { @@ -116,6 +113,11 @@ func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { return "", errUnified } + // If subsystem is empty, we look for the cgroupv2 hybrid path. + if len(subsystem) == 0 { + return hybridMountpoint, nil + } + // Avoid parsing mountinfo by trying the default path first, if possible. if path := tryDefaultPath(cgroupPath, subsystem); path != "" { return path, nil @@ -154,7 +156,7 @@ func findCgroupMountpointAndRootFromMI(mounts []*mountinfo.Info, cgroupPath, sub func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) { if len(m.Subsystems) == 0 { - return "", fmt.Errorf("no subsystem for mount") + return "", errors.New("no subsystem for mount") } return getControllerPath(m.Subsystems[0], cgroups) @@ -226,6 +228,11 @@ func GetOwnCgroupPath(subsystem string) (string, error) { return "", err } + // If subsystem is empty, we look for the cgroupv2 hybrid path. + if len(subsystem) == 0 { + return hybridMountpoint, nil + } + return getCgroupPathHelper(subsystem, cgroup) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go index 5ea9d940cef..2d4a8987109 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go @@ -28,17 +28,26 @@ type Cgroup struct { // ScopePrefix describes prefix for the scope name ScopePrefix string `json:"scope_prefix"` - // Paths represent the absolute cgroups paths to join. - // This takes precedence over Path. - Paths map[string]string - // Resources contains various cgroups settings to apply *Resources + // Systemd tells if systemd should be used to manage cgroups. + Systemd bool + // SystemdProps are any additional properties for systemd, // derived from org.systemd.property.xxx annotations. // Ignored unless systemd is used for managing cgroups. SystemdProps []systemdDbus.Property `json:"-"` + + // Rootless tells if rootless cgroups should be used. + Rootless bool + + // The host UID that should own the cgroup, or nil to accept + // the default ownership. This should only be set when the + // cgroupfs is to be mounted read/write. + // Not all cgroup manager implementations support changing + // the ownership. + OwnerUID *int `json:"owner_uid,omitempty"` } type Resources struct { @@ -117,6 +126,9 @@ type Resources struct { // Set class identifier for container's network packets NetClsClassid uint32 `json:"net_cls_classid_u"` + // Rdma resource restriction configuration + Rdma map[string]LinuxRdma `json:"rdma"` + // Used on cgroups v2: // CpuWeight sets a proportional bandwidth limit. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go index 2a519f582d2..7e383020f4c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 4281593f0b7..c1b4a0041c2 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -7,10 +7,10 @@ import ( "os/exec" "time" + "github.com/sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type Rlimit struct { @@ -31,10 +31,12 @@ type IDMap struct { // for syscalls. Additional architectures can be added by specifying them in // Architectures. type Seccomp struct { - DefaultAction Action `json:"default_action"` - Architectures []string `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` - DefaultErrnoRet *uint `json:"default_errno_ret"` + DefaultAction Action `json:"default_action"` + Architectures []string `json:"architectures"` + Syscalls []*Syscall `json:"syscalls"` + DefaultErrnoRet *uint `json:"default_errno_ret"` + ListenerPath string `json:"listener_path,omitempty"` + ListenerMetadata string `json:"listener_metadata,omitempty"` } // Action is taken upon rule match in Seccomp @@ -47,6 +49,9 @@ const ( Allow Trace Log + Notify + KillThread + KillProcess ) // Operator is a comparison operator to be used when matching syscall arguments in Seccomp @@ -246,6 +251,19 @@ const ( Poststop HookName = "poststop" ) +// KnownHookNames returns the known hook names. +// Used by `runc features`. +func KnownHookNames() []string { + return []string{ + string(Prestart), // deprecated + string(CreateRuntime), + string(CreateContainer), + string(StartContainer), + string(Poststart), + string(Poststop), + } +} + type Capabilities struct { // Bounding is the set of capabilities checked by the kernel. Bounding []string @@ -262,7 +280,7 @@ type Capabilities struct { func (hooks HookList) RunHooks(state *specs.State) error { for i, h := range hooks { if err := h.Run(state); err != nil { - return errors.Wrapf(err, "Running hook #%d:", i) + return fmt.Errorf("error running hook #%d: %w", i, err) } } @@ -375,7 +393,7 @@ func (c Command) Run(s *specs.State) error { go func() { err := cmd.Wait() if err != nil { - err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) + err = fmt.Errorf("error running hook: %w, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) } errC <- err }() diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go index 07da1080454..8c02848b70a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go @@ -1,17 +1,24 @@ package configs -import "fmt" +import "errors" + +var ( + errNoUIDMap = errors.New("User namespaces enabled, but no uid mappings found.") + errNoUserMap = errors.New("User namespaces enabled, but no user mapping found.") + errNoGIDMap = errors.New("User namespaces enabled, but no gid mappings found.") + errNoGroupMap = errors.New("User namespaces enabled, but no group mapping found.") +) // HostUID gets the translated uid for the process on host which could be // different when user namespaces are enabled. func (c Config) HostUID(containerId int) (int, error) { if c.Namespaces.Contains(NEWUSER) { if c.UidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no uid mappings found.") + return -1, errNoUIDMap } id, found := c.hostIDFromMapping(containerId, c.UidMappings) if !found { - return -1, fmt.Errorf("User namespaces enabled, but no user mapping found.") + return -1, errNoUserMap } return id, nil } @@ -30,11 +37,11 @@ func (c Config) HostRootUID() (int, error) { func (c Config) HostGID(containerId int) (int, error) { if c.Namespaces.Contains(NEWUSER) { if c.GidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.") + return -1, errNoGIDMap } id, found := c.hostIDFromMapping(containerId, c.GidMappings) if !found { - return -1, fmt.Errorf("User namespaces enabled, but no group mapping found.") + return -1, errNoGroupMap } return id, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go index 93bf41c8def..bce829e290a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go index 57e9f037d97..f8d951ab8b9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go @@ -1,6 +1,9 @@ package configs type IntelRdt struct { + // The identity for RDT Class of Service + ClosID string `json:"closID,omitempty"` + // The schema for L3 cache id and capacity bitmask (CBM) // Format: "L3:=;=;..." L3CacheSchema string `json:"l3_cache_schema,omitempty"` diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go index a75ff10ec9d..784c6182051 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go @@ -1,5 +1,7 @@ package configs +import "golang.org/x/sys/unix" + const ( // EXT_COPYUP is a directive to copy up the contents of a directory when // a tmpfs is mounted over it. @@ -28,6 +30,9 @@ type Mount struct { // Relabel source if set, "z" indicates shared, "Z" indicates unshared. Relabel string `json:"relabel"` + // RecAttr represents mount properties to be applied recursively (AT_RECURSIVE), see mount_setattr(2). + RecAttr *unix.MountAttr `json:"rec_attr"` + // Extensions are additional flags that are specific to runc. Extensions int `json:"extensions"` @@ -37,3 +42,7 @@ type Mount struct { // Optional Command to be run after Source is mounted. PostmountCmds []Command `json:"postmount_cmds"` } + +func (m *Mount) IsBind() bool { + return m.Flags&unix.MS_BIND != 0 +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go index 2dc7adfc966..0516dba8d09 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go index 5d9a5c81f3f..fbb0d49071e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && !windows // +build !linux,!windows package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go index cc76e2f586a..946db30a549 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package configs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go new file mode 100644 index 00000000000..c69f2c8024b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go @@ -0,0 +1,9 @@ +package configs + +// LinuxRdma for Linux cgroup 'rdma' resource management (Linux 4.11) +type LinuxRdma struct { + // Maximum number of HCA handles that can be opened. Default is "no limit". + HcaHandles *uint32 `json:"hca_handles,omitempty"` + // Maximum number of HCA objects that can be created. Default is "no limit". + HcaObjects *uint32 `json:"hca_objects,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go index 6d5b3d09df3..7d8e9fc3104 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go @@ -1,10 +1,10 @@ +//go:build !windows // +build !windows package devices import ( "errors" - "io/ioutil" "os" "path/filepath" @@ -16,8 +16,8 @@ var ErrNotADevice = errors.New("not a device node") // Testing dependencies var ( - unixLstat = unix.Lstat - ioutilReadDir = ioutil.ReadDir + unixLstat = unix.Lstat + osReadDir = os.ReadDir ) func mkDev(d *Rule) (uint64, error) { @@ -40,7 +40,7 @@ func DeviceFromPath(path, permissions string) (*Device, error) { var ( devType Type mode = stat.Mode - devNumber = uint64(stat.Rdev) + devNumber = uint64(stat.Rdev) //nolint:unconvert // Rdev is uint32 on e.g. MIPS. major = unix.Major(devNumber) minor = unix.Minor(devNumber) ) @@ -76,7 +76,7 @@ func HostDevices() ([]*Device, error) { // GetDevices recursively traverses a directory specified by path // and returns all devices found there. func GetDevices(path string) ([]*Device, error) { - files, err := ioutilReadDir(path) + files, err := osReadDir(path) if err != nil { return nil, err } @@ -103,7 +103,7 @@ func GetDevices(path string) ([]*Device, error) { } device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm") if err != nil { - if err == ErrNotADevice { + if errors.Is(err, ErrNotADevice) { continue } if os.IsNotExist(err) { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go index 967717a1b1c..f95c1409fce 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -1,3 +1,4 @@ +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package user diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index cc7a106be5d..2473c5eaddc 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -120,7 +120,7 @@ func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") + return nil, errors.New("nil source for passwd-formatted data") } var ( @@ -178,7 +178,7 @@ func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") + return nil, errors.New("nil source for group-formatted data") } rd := bufio.NewReader(r) out := []Group{} @@ -339,7 +339,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if userArg == "" { userArg = strconv.Itoa(user.Uid) } - return nil, fmt.Errorf("unable to find user %s: %v", userArg, err) + return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) } var matchedUserName string @@ -355,7 +355,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if uidErr != nil { // Not numeric. - return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries) + return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) } user.Uid = uidArg @@ -390,7 +390,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( return g.Name == groupArg }) if err != nil && group != nil { - return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err) + return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) } // Only start modifying user.Gid if it is in explicit form. @@ -404,7 +404,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if gidErr != nil { // Not numeric. - return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries) + return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) } user.Gid = gidArg @@ -445,7 +445,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err return false }) if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) + return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) } } @@ -468,7 +468,8 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err if !found { gid, err := strconv.ParseInt(ag, 10, 64) if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) + // Not a numeric ID either. + return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) } // Ensure gid is inside gid range. if gid < minID || gid > maxID { @@ -521,7 +522,7 @@ func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { if r == nil { - return nil, fmt.Errorf("nil source for subid-formatted data") + return nil, errors.New("nil source for subid-formatted data") } var ( @@ -574,7 +575,7 @@ func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { if r == nil { - return nil, fmt.Errorf("nil source for idmap-formatted data") + return nil, errors.New("nil source for idmap-formatted data") } var ( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go index 8c9bb5df39c..e018eae614e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package user diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go index 529f8eaea2f..1e00ab8b505 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_fuzzer.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package userns diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go index f45bb0c3156..f35c13a10e0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package userns diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go index c8a9364d54d..7ef9da21fd2 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go @@ -1,5 +1,3 @@ -// +build linux - package utils /* @@ -88,6 +86,11 @@ func SendFd(socket *os.File, name string, fd uintptr) error { if len(name) >= MaxNameLen { return fmt.Errorf("sendfd: filename too long: %s", name) } - oob := unix.UnixRights(int(fd)) - return unix.Sendmsg(int(socket.Fd()), []byte(name), oob, nil, 0) + return SendFds(socket, []byte(name), int(fd)) +} + +// SendFds sends a list of files descriptor and msg over the given AF_UNIX socket. +func SendFds(socket *os.File, msg []byte, fds ...int) error { + oob := unix.UnixRights(fds...) + return unix.Sendmsg(int(socket.Fd()), msg, oob, nil, 0) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go index cd78f23e1bd..6b9fc343522 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go @@ -11,7 +11,7 @@ import ( "strings" "unsafe" - "github.com/cyphar/filepath-securejoin" + securejoin "github.com/cyphar/filepath-securejoin" "golang.org/x/sys/unix" ) @@ -33,16 +33,6 @@ func init() { } } -// ResolveRootfs ensures that the current working directory is -// not a symlink and returns the absolute path to the rootfs -func ResolveRootfs(uncleanRootfs string) (string, error) { - rootfs, err := filepath.Abs(uncleanRootfs) - if err != nil { - return "", err - } - return filepath.EvalSymlinks(rootfs) -} - // ExitStatus returns the correct exit status for a process based on if it // was signaled or exited cleanly func ExitStatus(status unix.WaitStatus) int { @@ -120,7 +110,7 @@ func WithProcfd(root, unsafePath string, fn func(procfd string) error) error { unsafePath = stripRoot(root, unsafePath) path, err := securejoin.SecureJoin(root, unsafePath) if err != nil { - return fmt.Errorf("resolving path inside rootfs failed: %v", err) + return fmt.Errorf("resolving path inside rootfs failed: %w", err) } // Open the target path. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go index 1576f2d4ab6..220d0b43937 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package utils @@ -14,7 +15,7 @@ import ( func EnsureProcHandle(fh *os.File) error { var buf unix.Statfs_t if err := unix.Fstatfs(int(fh.Fd()), &buf); err != nil { - return fmt.Errorf("ensure %s is on procfs: %v", fh.Name(), err) + return fmt.Errorf("ensure %s is on procfs: %w", fh.Name(), err) } if buf.Type != unix.PROC_SUPER_MAGIC { return fmt.Errorf("%s is not on procfs", fh.Name()) @@ -52,7 +53,7 @@ func CloseExecFrom(minFd int) error { // Intentionally ignore errors from unix.CloseOnExec -- the cases where // this might fail are basically file descriptors that have already // been closed (including and especially the one that was created when - // ioutil.ReadDir did the "opendir" syscall). + // os.ReadDir did the "opendir" syscall). unix.CloseOnExec(fd) } return nil diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go index cad467507a5..5a59d151f67 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -61,16 +61,30 @@ func ClassIndex(class string) (int, error) { return classIndex(class) } -// SetFileLabel sets the SELinux label for this path or returns an error. +// SetFileLabel sets the SELinux label for this path, following symlinks, +// or returns an error. func SetFileLabel(fpath string, label string) error { return setFileLabel(fpath, label) } -// FileLabel returns the SELinux label for this path or returns an error. +// LsetFileLabel sets the SELinux label for this path, not following symlinks, +// or returns an error. +func LsetFileLabel(fpath string, label string) error { + return lSetFileLabel(fpath, label) +} + +// FileLabel returns the SELinux label for this path, following symlinks, +// or returns an error. func FileLabel(fpath string) (string, error) { return fileLabel(fpath) } +// LfileLabel returns the SELinux label for this path, not following symlinks, +// or returns an error. +func LfileLabel(fpath string) (string, error) { + return lFileLabel(fpath) +} + // SetFSCreateLabel tells the kernel what label to use for all file system objects // created by this task. // Set the label to an empty string to return to the default label. Calls to SetFSCreateLabel diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index b045843ad6e..ee602ab96dd 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -316,8 +316,9 @@ func classIndex(class string) (int, error) { return index, nil } -// setFileLabel sets the SELinux label for this path or returns an error. -func setFileLabel(fpath string, label string) error { +// lSetFileLabel sets the SELinux label for this path, not following symlinks, +// or returns an error. +func lSetFileLabel(fpath string, label string) error { if fpath == "" { return ErrEmptyPath } @@ -334,12 +335,50 @@ func setFileLabel(fpath string, label string) error { return nil } -// fileLabel returns the SELinux label for this path or returns an error. +// setFileLabel sets the SELinux label for this path, following symlinks, +// or returns an error. +func setFileLabel(fpath string, label string) error { + if fpath == "" { + return ErrEmptyPath + } + for { + err := unix.Setxattr(fpath, xattrNameSelinux, []byte(label), 0) + if err == nil { + break + } + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return &os.PathError{Op: "setxattr", Path: fpath, Err: err} + } + } + + return nil +} + +// fileLabel returns the SELinux label for this path, following symlinks, +// or returns an error. func fileLabel(fpath string) (string, error) { if fpath == "" { return "", ErrEmptyPath } + label, err := getxattr(fpath, xattrNameSelinux) + if err != nil { + return "", &os.PathError{Op: "getxattr", Path: fpath, Err: err} + } + // Trim the NUL byte at the end of the byte buffer, if present. + if len(label) > 0 && label[len(label)-1] == '\x00' { + label = label[:len(label)-1] + } + return string(label), nil +} + +// lFileLabel returns the SELinux label for this path, not following symlinks, +// or returns an error. +func lFileLabel(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + label, err := lgetxattr(fpath, xattrNameSelinux) if err != nil { return "", &os.PathError{Op: "lgetxattr", Path: fpath, Err: err} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 42657759c38..78743b020c9 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -17,10 +17,18 @@ func setFileLabel(fpath string, label string) error { return nil } +func lSetFileLabel(fpath string, label string) error { + return nil +} + func fileLabel(fpath string) (string, error) { return "", nil } +func lFileLabel(fpath string) (string, error) { + return "", nil +} + func setFSCreateLabel(label string) error { return nil } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go index c6b0a7f2655..9e473ca168f 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go @@ -36,3 +36,36 @@ func doLgetxattr(path, attr string, dest []byte) (int, error) { } } } + +// getxattr returns a []byte slice containing the value of +// an extended attribute attr set for path. +func getxattr(path, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := dogetxattr(path, attr, dest) + for errno == unix.ERANGE { //nolint:errorlint // unix errors are bare + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = dogetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + + dest = make([]byte, sz) + sz, errno = dogetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// dogetxattr is a wrapper that retries on EINTR +func dogetxattr(path, attr string, dest []byte) (int, error) { + for { + sz, err := unix.Getxattr(path, attr, dest) + if err != unix.EINTR { //nolint:errorlint // unix errors are bare + return sz, err + } + } +} diff --git a/vendor/github.com/openshift/imagebuilder/.travis.yml b/vendor/github.com/openshift/imagebuilder/.travis.yml index 97b530b4fd7..ff6afee5a0f 100644 --- a/vendor/github.com/openshift/imagebuilder/.travis.yml +++ b/vendor/github.com/openshift/imagebuilder/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - "1.9" - - "1.10" + - "1.16" + - "1.17" install: diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index df52699046b..71dc41ea560 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -44,6 +44,7 @@ type Run struct { type Executor interface { Preserve(path string) error EnsureContainerPath(path string) error + EnsureContainerPathAs(path, user string, mode *os.FileMode) error Copy(excludes []string, copies ...Copy) error Run(run Run, config docker.Config) error UnrecognizedInstruction(step *Step) error @@ -61,6 +62,15 @@ func (logExecutor) EnsureContainerPath(path string) error { return nil } +func (logExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + if mode != nil { + log.Printf("ENSURE %s AS %q with MODE=%q", path, user, *mode) + } else { + log.Printf("ENSURE %s AS %q", path, user) + } + return nil +} + func (logExecutor) Copy(excludes []string, copies ...Copy) error { for _, c := range copies { log.Printf("COPY %v -> %s (from:%s download:%t), chown: %s, chmod %s", c.Src, c.Dest, c.From, c.Download, c.Chown, c.Chmod) @@ -88,6 +98,10 @@ func (noopExecutor) EnsureContainerPath(path string) error { return nil } +func (noopExecutor) EnsureContainerPathAs(path, user string, mode *os.FileMode) error { + return nil +} + func (noopExecutor) Copy(excludes []string, copies ...Copy) error { return nil } @@ -303,6 +317,9 @@ type Builder struct { PendingCopies []Copy Warnings []string + // Raw platform string specified with `FROM --platform` of the stage + // It's up to the implementation or client to parse and use this field + Platform string } func NewBuilder(args map[string]string) *Builder { @@ -375,7 +392,7 @@ func (b *Builder) Run(step *Step, exec Executor, noRunsRemaining bool) error { } if len(b.RunConfig.WorkingDir) > 0 { - if err := exec.EnsureContainerPath(b.RunConfig.WorkingDir); err != nil { + if err := exec.EnsureContainerPathAs(b.RunConfig.WorkingDir, b.RunConfig.User, nil); err != nil { return err } } @@ -470,7 +487,7 @@ func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error { b.RunConfig.Env = nil // Check to see if we have a default PATH, note that windows won't - // have one as its set by HCS + // have one as it's set by HCS if runtime.GOOS != "windows" && !hasEnvName(b.Env, "PATH") { b.RunConfig.Env = append(b.RunConfig.Env, "PATH="+defaultPathEnv) } diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index 0d82136e704..fd05f031c08 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -234,6 +234,20 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri return fmt.Errorf("Windows does not support FROM scratch") } } + userArgs := mergeEnv(envMapAsSlice(b.Args), b.Env) + for _, a := range flagArgs { + arg, err := ProcessWord(a, userArgs) + if err != nil { + return err + } + switch { + case strings.HasPrefix(arg, "--platform="): + platformString := strings.TrimPrefix(arg, "--platform=") + b.Platform = platformString + default: + return fmt.Errorf("FROM only supports the --platform flag") + } + } b.RunConfig.Image = name // TODO: handle onbuild return nil @@ -573,65 +587,63 @@ var targetArgs = []string{"TARGETOS", "TARGETARCH", "TARGETVARIANT"} // to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. func arg(b *Builder, args []string, attributes map[string]bool, flagArgs []string, original string) error { - if len(args) != 1 { - return fmt.Errorf("ARG requires exactly one argument definition") - } - var ( name string value string hasDefault bool ) - arg := args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - name = parts[0] - value = parts[1] - hasDefault = true - if name == "TARGETPLATFORM" { - p, err := platforms.Parse(value) - if err != nil { - return fmt.Errorf("error parsing TARGETPLATFORM argument") - } - for _, val := range targetArgs { - b.AllowedArgs[val] = true - } - b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture - b.Args["TARGETOS"] = p.OS - b.Args["TARGETARCH"] = p.Architecture - b.Args["TARGETVARIANT"] = p.Variant - if p.Variant != "" { - b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant + for _, argument := range args { + arg := argument + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + name = parts[0] + value = parts[1] + hasDefault = true + if name == "TARGETPLATFORM" { + p, err := platforms.Parse(value) + if err != nil { + return fmt.Errorf("error parsing TARGETPLATFORM argument") + } + for _, val := range targetArgs { + b.AllowedArgs[val] = true + } + b.Args["TARGETPLATFORM"] = p.OS + "/" + p.Architecture + b.Args["TARGETOS"] = p.OS + b.Args["TARGETARCH"] = p.Architecture + b.Args["TARGETVARIANT"] = p.Variant + if p.Variant != "" { + b.Args["TARGETPLATFORM"] = b.Args["TARGETPLATFORM"] + "/" + p.Variant + } } + } else if val, ok := builtinBuildArgs[arg]; ok { + name = arg + value = val + hasDefault = true + } else { + name = arg + hasDefault = false } - } else if val, ok := builtinBuildArgs[arg]; ok { - name = arg - value = val - hasDefault = true - } else { - name = arg - hasDefault = false - } - // add the arg to allowed list of build-time args from this step on. - b.AllowedArgs[name] = true + // add the arg to allowed list of build-time args from this step on. + b.AllowedArgs[name] = true - // If there is still no default value, a value can be assigned from the heading args - if val, ok := b.HeadingArgs[name]; ok && !hasDefault { - b.Args[name] = val - } + // If there is still no default value, a value can be assigned from the heading args + if val, ok := b.HeadingArgs[name]; ok && !hasDefault { + b.Args[name] = val + } - // If there is a default value associated with this arg then add it to the - // b.buildArgs, later default values for the same arg override earlier ones. - // The args passed to builder (UserArgs) override the default value of 'arg' - // Don't add them here as they were already set in NewBuilder. - if _, ok := b.UserArgs[name]; !ok && hasDefault { - b.Args[name] = value + // If there is a default value associated with this arg then add it to the + // b.buildArgs, later default values for the same arg override earlier ones. + // The args passed to builder (UserArgs) override the default value of 'arg' + // Don't add them here as they were already set in NewBuilder. + if _, ok := b.UserArgs[name]; !ok && hasDefault { + b.Args[name] = value + } } return nil diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec index 79d16ec398d..6a88a4fc4b0 100644 --- a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec +++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec @@ -12,7 +12,7 @@ # %global golang_version 1.8.1 -%{!?version: %global version 1.2.2-dev} +%{!?version: %global version 1.2.3} %{!?release: %global release 1} %global package_name imagebuilder %global product_name Container Image Builder diff --git a/vendor/github.com/openshift/imagebuilder/internals.go b/vendor/github.com/openshift/imagebuilder/internals.go index f33dc70bbf7..d4b95390381 100644 --- a/vendor/github.com/openshift/imagebuilder/internals.go +++ b/vendor/github.com/openshift/imagebuilder/internals.go @@ -52,7 +52,7 @@ func hasSlash(input string) bool { // makeAbsolute ensures that the provided path is absolute. func makeAbsolute(dest, workingDir string) string { - // Twiddle the destination when its a relative path - meaning, make it + // Twiddle the destination when it's a relative path - meaning, make it // relative to the WORKINGDIR if dest == "." { if !hasSlash(workingDir) { diff --git a/vendor/github.com/openshift/imagebuilder/vendor.conf b/vendor/github.com/openshift/imagebuilder/vendor.conf deleted file mode 100644 index 8074ce80a12..00000000000 --- a/vendor/github.com/openshift/imagebuilder/vendor.conf +++ /dev/null @@ -1,25 +0,0 @@ -github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 -github.com/containerd/containerd v1.3.0 -github.com/containers/storage v1.2 -github.com/docker/docker b68221c37ee597950364788204546f9c9d0e46a1 -github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e -github.com/docker/go-units 2fb04c6466a548a03cb009c5569ee1ab1e35398e -github.com/fsouza/go-dockerclient openshift-4.0 https://github.com/openshift/go-dockerclient.git -github.com/gogo/protobuf c5a62797aee0054613cc578653a16c6237fef080 -github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e -github.com/Microsoft/go-winio 1a8911d1ed007260465c3bfbbc785ac6915a0bb8 -github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 -github.com/opencontainers/go-digest ac19fd6e7483ff933754af248d80be865e543d22 -github.com/opencontainers/image-spec 243ea084a44451d27322fed02b682d99e2af3ba9 -github.com/opencontainers/runc 923a8f8a9a07aceada5fc48c4d37e905d9b019b5 -github.com/pkg/errors 27936f6d90f9c8e1145f11ed52ffffbfdb9e0af7 -github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac -github.com/sirupsen/logrus d7b6bf5e4d26448fd977d07d745a2a66097ddecb -golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 -golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 -golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba -k8s.io/klog 8e90cee79f823779174776412c13478955131846 -google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 -github.com/golang/protobuf v1.2.0 -google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 - diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go index 30572ea87ff..6a6acfd9ab3 100644 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go +++ b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go @@ -56,6 +56,9 @@ func (v *GVariant) native() *C.GVariant { } func (v *GVariant) Ptr() unsafe.Pointer { + if v == nil { + return nil + } return v.ptr } diff --git a/vendor/github.com/mtrmac/gpgme/.appveyor.yml b/vendor/github.com/proglottis/gpgme/.appveyor.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.appveyor.yml rename to vendor/github.com/proglottis/gpgme/.appveyor.yml diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.gitignore rename to vendor/github.com/proglottis/gpgme/.gitignore diff --git a/vendor/github.com/mtrmac/gpgme/.travis.yml b/vendor/github.com/proglottis/gpgme/.travis.yml similarity index 100% rename from vendor/github.com/mtrmac/gpgme/.travis.yml rename to vendor/github.com/proglottis/gpgme/.travis.yml diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/proglottis/gpgme/LICENSE similarity index 100% rename from vendor/github.com/mtrmac/gpgme/LICENSE rename to vendor/github.com/proglottis/gpgme/LICENSE diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/proglottis/gpgme/README.md similarity index 100% rename from vendor/github.com/mtrmac/gpgme/README.md rename to vendor/github.com/proglottis/gpgme/README.md diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/callbacks.go rename to vendor/github.com/proglottis/gpgme/callbacks.go diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/data.go rename to vendor/github.com/proglottis/gpgme/data.go diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c similarity index 100% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.c rename to vendor/github.com/proglottis/gpgme/go_gpgme.c diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h similarity index 92% rename from vendor/github.com/mtrmac/gpgme/go_gpgme.h rename to vendor/github.com/proglottis/gpgme/go_gpgme.h index d4826ab368e..eb3a4ba88b1 100644 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h +++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h @@ -6,11 +6,6 @@ #include -/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */ -#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402 -typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */ -#endif - extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go similarity index 96% rename from vendor/github.com/mtrmac/gpgme/gpgme.go rename to vendor/github.com/proglottis/gpgme/gpgme.go index c19b9aebc5c..9833057a663 100644 --- a/vendor/github.com/mtrmac/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -53,13 +53,13 @@ const ( type PinEntryMode int -// const ( // Unavailable in 1.3.2 -// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT -// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK -// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL -// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR -// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK -// ) +const ( + PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT + PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK + PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL + PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR + PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK +) type EncryptFlag uint @@ -348,19 +348,17 @@ func (c *Context) KeyListMode() KeyListMode { return res } -// Unavailable in 1.3.2: -// func (c *Context) SetPinEntryMode(m PinEntryMode) error { -// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) -// runtime.KeepAlive(c) -// return err -// } +func (c *Context) SetPinEntryMode(m PinEntryMode) error { + err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) + runtime.KeepAlive(c) + return err +} -// Unavailable in 1.3.2: -// func (c *Context) PinEntryMode() PinEntryMode { -// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) -// runtime.KeepAlive(c) -// return res -// } +func (c *Context) PinEntryMode() PinEntryMode { + res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) + runtime.KeepAlive(c) + return res +} func (c *Context) SetCallback(callback Callback) error { var err error diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info.go diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go b/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go similarity index 100% rename from vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go rename to vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 83c49b66a81..861b4d21cac 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -49,7 +49,10 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // http.RoundTripper to observe the request result with the provided CounterVec. // The CounterVec must have zero, one, or two non-const non-curried labels. For // those, the only allowed label names are "code" and "method". The function -// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// panics otherwise. For the "method" label a predefined default label value set +// is used to filter given values. Values besides predefined values will count +// as `unknown` method.`WithExtraMethods` can be used to add more +// methods to the set. Partitioning of the CounterVec happens by HTTP status code // and/or HTTP method if the respective instance label names are present in the // CounterVec. For unpartitioned counting, use a CounterVec with zero labels. // @@ -57,13 +60,18 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // is not incremented. // // See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { + rtOpts := &option{} + for _, o := range opts { + o(rtOpts) + } + code, method := checkLabels(counter) return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() } return resp, err }) @@ -73,7 +81,10 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // http.RoundTripper to observe the request duration with the provided // ObserverVec. The ObserverVec must have zero, one, or two non-const // non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. The Observe method of the Observer +// "method". The function panics otherwise. For the "method" label a predefined +// default label value set is used to filter given values. Values besides +// predefined values will count as `unknown` method. `WithExtraMethods` +// can be used to add more methods to the set. The Observe method of the Observer // in the ObserverVec is called with the request duration in // seconds. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For @@ -85,14 +96,19 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { + rtOpts := &option{} + for _, o := range opts { + o(rtOpts) + } + code, method := checkLabels(obs) return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds()) } return resp, err }) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index ab037db8619..a23f0edc6f8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -45,7 +45,10 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // http.Handler to observe the request duration with the provided ObserverVec. // The ObserverVec must have valid metric and label names and must have zero, // one, or two non-const non-curried labels. For those, the only allowed label -// names are "code" and "method". The function panics otherwise. The Observe +// names are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +//`WithExtraMethods` can be used to add more methods to the set. The Observe // method of the Observer in the ObserverVec is called with the request duration // in seconds. Partitioning happens by HTTP status code and/or HTTP method if // the respective instance label names are present in the ObserverVec. For @@ -58,7 +61,12 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + mwOpts := &option{} + for _, o := range opts { + o(mwOpts) + } + code, method := checkLabels(obs) if code { @@ -67,14 +75,14 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) }) } @@ -82,7 +90,10 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht // to observe the request result with the provided CounterVec. The CounterVec // must have valid metric and label names and must have zero, one, or two // non-const non-curried labels. For those, the only allowed label names are -// "code" and "method". The function panics otherwise. Partitioning of the +// "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the // CounterVec happens by HTTP status code and/or HTTP method if the respective // instance label names are present in the CounterVec. For unpartitioned // counting, use a CounterVec with zero labels. @@ -92,20 +103,25 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht // If the wrapped Handler panics, the Counter is not incremented. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { + mwOpts := &option{} + for _, o := range opts { + o(mwOpts) + } + code, method := checkLabels(counter) if code { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status())).Inc() + counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc() }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0)).Inc() + counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc() }) } @@ -114,7 +130,10 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) // until the response headers are written. The ObserverVec must have valid // metric and label names and must have zero, one, or two non-const non-curried // labels. For those, the only allowed label names are "code" and "method". The -// function panics otherwise. The Observe method of the Observer in the +// function panics otherwise. For the "method" label a predefined default label +// value set is used to filter given values. Values besides predefined values +// will count as `unknown` method.`WithExtraMethods` can be used to add more +// methods to the set. The Observe method of the Observer in the // ObserverVec is called with the request duration in seconds. Partitioning // happens by HTTP status code and/or HTTP method if the respective instance // label names are present in the ObserverVec. For unpartitioned observations, @@ -128,13 +147,18 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) // if used with Go1.9+. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + mwOpts := &option{} + for _, o := range opts { + o(mwOpts) + } + code, method := checkLabels(obs) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) }) next.ServeHTTP(d, r) }) @@ -144,8 +168,11 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // http.Handler to observe the request size with the provided ObserverVec. The // ObserverVec must have valid metric and label names and must have zero, one, // or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. The Observe method of -// the Observer in the ObserverVec is called with the request size in +// are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the request size in // bytes. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that @@ -156,7 +183,12 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // If the wrapped Handler panics, no values are reported. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + mwOpts := &option{} + for _, o := range opts { + o(mwOpts) + } + code, method := checkLabels(obs) if code { @@ -164,14 +196,14 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size)) }) } @@ -179,8 +211,11 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) // http.Handler to observe the response size with the provided ObserverVec. The // ObserverVec must have valid metric and label names and must have zero, one, // or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. The Observe method of -// the Observer in the ObserverVec is called with the response size in +// are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the response size in // bytes. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that @@ -191,12 +226,18 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) // If the wrapped Handler panics, no values are reported. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { + mwOpts := &option{} + for _, o := range opts { + o(mwOpts) + } + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written())) }) } @@ -290,7 +331,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { // unnecessary allocations on each request. var emptyLabels = prometheus.Labels{} -func labels(code, method bool, reqMethod string, status int) prometheus.Labels { +func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { if !(code || method) { return emptyLabels } @@ -300,7 +341,7 @@ func labels(code, method bool, reqMethod string, status int) prometheus.Labels { labels["code"] = sanitizeCode(status) } if method { - labels["method"] = sanitizeMethod(reqMethod) + labels["method"] = sanitizeMethod(reqMethod, extraMethods...) } return labels @@ -330,7 +371,12 @@ func computeApproximateRequestSize(r *http.Request) int { return s } -func sanitizeMethod(m string) string { +// If the wrapped http.Handler has a known method, it will be sanitized and returned. +// Otherwise, "unknown" will be returned. The known method list can be extended +// as needed by using extraMethods parameter. +func sanitizeMethod(m string, extraMethods ...string) string { + // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for + // the methods chosen as default. switch m { case "GET", "get": return "get" @@ -348,15 +394,25 @@ func sanitizeMethod(m string) string { return "options" case "NOTIFY", "notify": return "notify" + case "TRACE", "trace": + return "trace" + case "PATCH", "patch": + return "patch" default: - return strings.ToLower(m) + for _, method := range extraMethods { + if strings.EqualFold(m, method) { + return strings.ToLower(m) + } + } + return "unknown" } } // If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, santizeCode will return 200, for consistency with behavior in +// currently 0, sanitizeCode will return 200, for consistency with behavior in // the stdlib. func sanitizeCode(s int) string { + // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml switch s { case 100: return "100" @@ -453,6 +509,9 @@ func sanitizeCode(s int) string { return "511" default: - return strconv.Itoa(s) + if s >= 100 && s <= 599 { + return strconv.Itoa(s) + } + return "unknown" } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go new file mode 100644 index 00000000000..35e41bd1e6b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -0,0 +1,31 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +// Option are used to configure a middleware or round tripper.. +type Option func(*option) + +type option struct { + extraMethods []string +} + +// WithExtraMethods adds additional HTTP methods to the list of allowed methods. +// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. +// +// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. +func WithExtraMethods(methods ...string) Option { + return func(o *option) { + o.extraMethods = methods + } +} diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml index 2f3351d7ae5..a49fff15ac1 100644 --- a/vendor/github.com/russross/blackfriday/.travis.yml +++ b/vendor/github.com/russross/blackfriday/.travis.yml @@ -3,6 +3,7 @@ language: go go: - "1.9.x" - "1.10.x" + - "1.11.x" - tip matrix: fast_finish: true diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt index 2885af3602d..7fbb253a8ec 100644 --- a/vendor/github.com/russross/blackfriday/LICENSE.txt +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -1,29 +1,28 @@ Blackfriday is distributed under the Simplified BSD License: -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. +Copyright © 2011 Russ Ross +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with + the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md index 3c62e137533..997ef5d4299 100644 --- a/vendor/github.com/russross/blackfriday/README.md +++ b/vendor/github.com/russross/blackfriday/README.md @@ -1,6 +1,6 @@ Blackfriday -[![Build Status][BuildSVG]][BuildURL] -[![Godoc][GodocV2SVG]][GodocV2URL] +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] =========== Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It @@ -18,12 +18,21 @@ It started as a translation from C of [Sundown][3]. Installation ------------ -Blackfriday is compatible with any modern Go release. With Go and git installed: +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: - go get -u gopkg.in/russross/blackfriday.v2 + go get github.com/russross/blackfriday -will download, compile, and install the package into your `$GOPATH` directory -hierarchy. +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday" + +and `go get` without parameters. + +Old versions of Go and legacy GOPATH mode might work, +but no effort is made to keep them working. Versions @@ -32,13 +41,9 @@ Versions Currently maintained and recommended version of Blackfriday is `v2`. It's being developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. +https://pkg.go.dev/github.com/russross/blackfriday/v2. -It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. Version 2 offers a number of improvements over v1: @@ -60,22 +65,7 @@ Potential drawbacks: If you are still interested in the legacy `v1`, you can import it from `github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://godoc.org/github.com/russross/blackfriday - -### Known issue with `dep` - -There is a known problem with using Blackfriday v1 _transitively_ and `dep`. -Currently `dep` prioritizes semver versions over anything else, and picks the -latest one, plus it does not apply a `[[constraint]]` specifier to transitively -pulled in packages. So if you're using something that uses Blackfriday v1, but -that something does not use `dep` yet, you will get Blackfriday v2 pulled in and -your first dependency will fail to build. - -There are couple of fixes for it, documented here: -https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version - -Meanwhile, `dep` team is working on a more general solution to the constraints -on transitive dependencies problem: https://github.com/golang/dep/issues/1124. +here: https://pkg.go.dev/github.com/russross/blackfriday. Usage @@ -86,12 +76,16 @@ Usage For basic usage, it is as simple as getting your input into a byte slice and calling: - output := blackfriday.MarkdownBasic(input) +```go +output := blackfriday.MarkdownBasic(input) +``` This renders it with no extensions enabled. To get a more useful feature set, use this instead: - output := blackfriday.MarkdownCommon(input) +```go +output := blackfriday.MarkdownCommon(input) +``` ### v2 @@ -121,7 +115,7 @@ Here's an example of simple usage of Blackfriday together with Bluemonday: ```go import ( "github.com/microcosm-cc/bluemonday" - "gopkg.in/russross/blackfriday.v2" + "github.com/russross/blackfriday" ) // ... @@ -154,7 +148,7 @@ markdown file using a standalone program. You can also browse the source directly on github if you are just looking for some example code: -* +* Note that if you have not already done so, installing `blackfriday-tool` will be sufficient to download and install @@ -171,12 +165,12 @@ anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The algorithm has a specification, so that other packages can create compatible anchor names and links to those anchors. -The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. -[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to create compatible links to the anchor names generated by blackfriday. This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients that want a small package and don't need full functionality of blackfriday. @@ -246,7 +240,7 @@ implements the following extensions: and supply a language (to make syntax highlighting simple). Just mark it like this: - ``` go + ```go func getTrue() bool { return true } @@ -258,7 +252,7 @@ implements the following extensions: To preserve classes of fenced code blocks while using the bluemonday HTML sanitizer, use the following policy: - ``` go + ```go p := bluemonday.UGCPolicy() p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") html := p.SanitizeBytes(unsafe) @@ -269,7 +263,7 @@ implements the following extensions: Cat : Fluffy animal everyone likes - + Internet : Vector of transmission for pictures of cats @@ -280,7 +274,7 @@ implements the following extensions: end of the document. A footnote looks like this: This is a footnote.[^1] - + [^1]: the footnote text. * **Autolinking**. Blackfriday can find URLs that have not been @@ -317,7 +311,7 @@ Other renderers Blackfriday is structured to allow alternative rendering engines. Here are a few of note: -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): provides a GitHub Flavored Markdown renderer with fenced code block highlighting, clickable heading anchor links. @@ -328,7 +322,7 @@ are a few of note: * [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, but for markdown. -* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): renders output as LaTeX. * [bfchroma](https://github.com/Depado/bfchroma/): provides convenience @@ -337,6 +331,10 @@ are a few of note: provides a drop-in renderer ready to use with Blackfriday, as well as options and means for further customization. +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + TODO ---- @@ -357,13 +355,10 @@ License [1]: https://daringfireball.net/projects/markdown/ "Markdown" [2]: https://golang.org/ "Go Language" [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" - [7]: https://github.com/golang/dep/ "dep" - [8]: https://github.com/Masterminds/glide "Glide" - - [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master - [BuildURL]: https://travis-ci.org/russross/blackfriday - [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg - [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go index 45c21a6c267..563cb290384 100644 --- a/vendor/github.com/russross/blackfriday/block.go +++ b/vendor/github.com/russross/blackfriday/block.go @@ -649,14 +649,17 @@ func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bo } i = skipChar(data, i, ' ') - if i >= len(data) || data[i] != '\n' { - if newlineOptional && i == len(data) { + if i >= len(data) { + if newlineOptional { return i, marker } return 0, "" } + if data[i] == '\n' { + i++ // Take newline into account + } - return i + 1, marker // Take newline into account. + return i, marker } // fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, @@ -1133,6 +1136,15 @@ func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { i++ } + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + if p.flags&EXTENSION_FENCED_CODE != 0 && i > line { + // determine if codeblock starts on the first line + _, codeBlockMarker = isFenceLine(data[line:i], nil, "", false) + } + // get working buffer var raw bytes.Buffer @@ -1140,11 +1152,6 @@ func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { raw.Write(data[line:i]) line = i - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - gatherlines: for line < len(data) { i++ @@ -1153,7 +1160,6 @@ gatherlines: for data[i-1] != '\n' { i++ } - // if it is an empty line, guess that it is part of this item // and move on to the next line if p.isEmpty(data[line:i]) > 0 { diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go index e0a6c69c96d..fa044ca2152 100644 --- a/vendor/github.com/russross/blackfriday/html.go +++ b/vendor/github.com/russross/blackfriday/html.go @@ -32,6 +32,7 @@ const ( HTML_SAFELINK // only link to trusted protocols HTML_NOFOLLOW_LINKS // only link with rel="nofollow" HTML_NOREFERRER_LINKS // only link with rel="noreferrer" + HTML_NOOPENER_LINKS // only link with rel="noopener" HTML_HREF_TARGET_BLANK // add a blank target HTML_TOC // generate a table of contents HTML_OMIT_CONTENTS // skip the main contents (for a standalone table of contents) @@ -445,6 +446,9 @@ func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) { if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) { relAttrs = append(relAttrs, "noreferrer") } + if options.flags&HTML_NOOPENER_LINKS != 0 && !isRelativeLink(link) { + relAttrs = append(relAttrs, "noopener") + } if len(relAttrs) > 0 { out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " "))) } @@ -559,6 +563,9 @@ func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) { relAttrs = append(relAttrs, "noreferrer") } + if options.flags&HTML_NOOPENER_LINKS != 0 && !isRelativeLink(link) { + relAttrs = append(relAttrs, "noopener") + } if len(relAttrs) > 0 { out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " "))) } diff --git a/vendor/github.com/russross/blackfriday/inline.go b/vendor/github.com/russross/blackfriday/inline.go index 4483b8f19fd..27056ebcad3 100644 --- a/vendor/github.com/russross/blackfriday/inline.go +++ b/vendor/github.com/russross/blackfriday/inline.go @@ -252,7 +252,7 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int { case data[i] == '\n': textHasNl = true - case data[i-1] == '\\': + case isBackslashEscaped(data, i): continue case data[i] == '[': diff --git a/vendor/github.com/russross/blackfriday/markdown.go b/vendor/github.com/russross/blackfriday/markdown.go index 41595d62d08..a0349cafecb 100644 --- a/vendor/github.com/russross/blackfriday/markdown.go +++ b/vendor/github.com/russross/blackfriday/markdown.go @@ -132,6 +132,7 @@ var blockTags = map[string]struct{}{ "article": {}, "aside": {}, "canvas": {}, + "details": {}, "figcaption": {}, "figure": {}, "footer": {}, @@ -142,6 +143,7 @@ var blockTags = map[string]struct{}{ "output": {}, "progress": {}, "section": {}, + "summary": {}, "video": {}, } diff --git a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml index feef144d130..5240d462280 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/.travis.yml +++ b/vendor/github.com/seccomp/libseccomp-golang/.travis.yml @@ -19,19 +19,39 @@ os: language: go +jobs: + include: + - name: "last libseccomp 2.5.0" + env: + - SECCOMP_VER=2.5.0 + - SECCOMP_SHA256SUM=1ffa7038d2720ad191919816db3479295a4bcca1ec14e02f672539f4983014f3 + - name: "compat libseccomp 2.4.4" + env: + - SECCOMP_VER=2.4.4 + - SECCOMP_SHA256SUM=4e79738d1ef3c9b7ca9769f1f8b8d84fc17143c2c1c432e53b9c64787e0ff3eb + - name: "compat libseccomp 2.2.1" + env: + - SECCOMP_VER=2.2.1 + - SECCOMP_SHA256SUM=0ba1789f54786c644af54cdffc9fd0dd0a8bb2b2ee153933f658855d2851a740 + addons: apt: packages: - build-essential - # TODO: use the main libseccomp git repo instead of a distro package - - libseccomp2 - - libseccomp-dev + - astyle + - golint + - gperf install: - go get -u golang.org/x/lint/golint # run all of the tests independently, fail if any of the tests error script: + - wget https://github.com/seccomp/libseccomp/releases/download/v$SECCOMP_VER/libseccomp-$SECCOMP_VER.tar.gz + - echo $SECCOMP_SHA256SUM libseccomp-$SECCOMP_VER.tar.gz | sha256sum -c + - tar xf libseccomp-$SECCOMP_VER.tar.gz + - pushd libseccomp-$SECCOMP_VER && ./configure --prefix=/opt/libseccomp-$SECCOMP_VER && make && sudo make install && popd - make check-syntax - make lint - - make check + - PKG_CONFIG_PATH=/opt/libseccomp-$SECCOMP_VER/lib/pkgconfig LD_LIBRARY_PATH=/opt/libseccomp-$SECCOMP_VER/lib make vet + - PKG_CONFIG_PATH=/opt/libseccomp-$SECCOMP_VER/lib/pkgconfig LD_LIBRARY_PATH=/opt/libseccomp-$SECCOMP_VER/lib make test diff --git a/vendor/github.com/seccomp/libseccomp-golang/Makefile b/vendor/github.com/seccomp/libseccomp-golang/Makefile index 1ff4cc89859..38cfa852cd6 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/Makefile +++ b/vendor/github.com/seccomp/libseccomp-golang/Makefile @@ -18,8 +18,14 @@ fix-syntax: vet: go vet -v +# Previous bugs have made the tests freeze until the timeout. Golang default +# timeout for tests is 10 minutes, which is too long, considering current tests +# can be executed in less than 1 second. Reduce the timeout, so problems can +# be noticed earlier in the CI. +TEST_TIMEOUT=10s + test: - go test -v + go test -v -timeout $(TEST_TIMEOUT) lint: @$(if $(shell which golint),true,$(error "install golint and include it in your PATH")) diff --git a/vendor/github.com/seccomp/libseccomp-golang/README.md b/vendor/github.com/seccomp/libseccomp-golang/README.md index 27423f2d97f..806a5ddf29b 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/README.md +++ b/vendor/github.com/seccomp/libseccomp-golang/README.md @@ -2,7 +2,7 @@ =============================================================================== https://github.com/seccomp/libseccomp-golang -[![Build Status](https://img.shields.io/travis/seccomp/libseccomp-golang/master.svg)](https://travis-ci.org/seccomp/libseccomp-golang) +[![Build Status](https://img.shields.io/travis/seccomp/libseccomp-golang/main.svg)](https://travis-ci.org/seccomp/libseccomp-golang) The libseccomp library provides an easy to use, platform independent, interface to the Linux Kernel's syscall filtering mechanism. The libseccomp API is diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go index e489b9ebd4a..e9b92e2219a 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/seccomp.go +++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp.go @@ -20,6 +20,13 @@ import ( // C wrapping code +// To compile libseccomp-golang against a specific version of libseccomp: +// cd ../libseccomp && mkdir -p prefix +// ./configure --prefix=$PWD/prefix && make && make install +// cd ../libseccomp-golang +// PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make +// LD_PRELOAD=$PWD/../libseccomp/prefix/lib/libseccomp.so.2.5.0 PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make test + // #cgo pkg-config: libseccomp // #include // #include @@ -34,19 +41,25 @@ type VersionError struct { minimum string } +func init() { + // This forces the cgo libseccomp to initialize its internal API support state, + // which is necessary on older versions of libseccomp in order to work + // correctly. + GetAPI() +} + func (e VersionError) Error() string { - format := "Libseccomp version too low: " + messageStr := "" if e.message != "" { - format += e.message + ": " + messageStr = e.message + ": " } - format += "minimum supported is " + minimumStr := "" if e.minimum != "" { - format += e.minimum + ": " + minimumStr = e.minimum } else { - format += "2.2.0: " + minimumStr = "2.2.0" } - format += "detected %d.%d.%d" - return fmt.Sprintf(format, verMajor, verMinor, verMicro) + return fmt.Sprintf("Libseccomp version too low: %sminimum supported is %s: detected %d.%d.%d", messageStr, minimumStr, verMajor, verMinor, verMicro) } // ScmpArch represents a CPU architecture. Seccomp can restrict syscalls on a @@ -69,9 +82,61 @@ type ScmpCondition struct { Operand2 uint64 `json:"operand_two,omitempty"` } -// ScmpSyscall represents a Linux System Call +// Seccomp userspace notification structures associated with filters that use the ActNotify action. + +// ScmpSyscall identifies a Linux System Call by its number. type ScmpSyscall int32 +// ScmpFd represents a file-descriptor used for seccomp userspace notifications. +type ScmpFd int32 + +// ScmpNotifData describes the system call context that triggered a notification. +// +// Syscall: the syscall number +// Arch: the filter architecture +// InstrPointer: address of the instruction that triggered a notification +// Args: arguments (up to 6) for the syscall +// +type ScmpNotifData struct { + Syscall ScmpSyscall `json:"syscall,omitempty"` + Arch ScmpArch `json:"arch,omitempty"` + InstrPointer uint64 `json:"instr_pointer,omitempty"` + Args []uint64 `json:"args,omitempty"` +} + +// ScmpNotifReq represents a seccomp userspace notification. See NotifReceive() for +// info on how to pull such a notification. +// +// ID: notification ID +// Pid: process that triggered the notification event +// Flags: filter flags (see seccomp(2)) +// Data: system call context that triggered the notification +// +type ScmpNotifReq struct { + ID uint64 `json:"id,omitempty"` + Pid uint32 `json:"pid,omitempty"` + Flags uint32 `json:"flags,omitempty"` + Data ScmpNotifData `json:"data,omitempty"` +} + +// ScmpNotifResp represents a seccomp userspace notification response. See NotifRespond() +// for info on how to push such a response. +// +// ID: notification ID (must match the corresponding ScmpNotifReq ID) +// Error: must be 0 if no error occurred, or an error constant from package +// syscall (e.g., syscall.EPERM, etc). In the latter case, it's used +// as an error return from the syscall that created the notification. +// Val: return value for the syscall that created the notification. Only +// relevant if Error is 0. +// Flags: userspace notification response flag (e.g., NotifRespFlagContinue) +// +type ScmpNotifResp struct { + ID uint64 `json:"id,omitempty"` + Error int32 `json:"error,omitempty"` + Val uint64 `json:"val,omitempty"` + Flags uint32 `json:"flags,omitempty"` +} + // Exported Constants const ( @@ -117,6 +182,10 @@ const ( ArchS390 ScmpArch = iota // ArchS390X represents 64-bit System z/390 syscalls ArchS390X ScmpArch = iota + // ArchPARISC represents 32-bit PA-RISC + ArchPARISC ScmpArch = iota + // ArchPARISC64 represents 64-bit PA-RISC + ArchPARISC64 ScmpArch = iota ) const ( @@ -130,6 +199,9 @@ const ( ActKill ScmpAction = iota // ActTrap throws SIGSYS ActTrap ScmpAction = iota + // ActNotify triggers a userspace notification. This action is only usable when + // libseccomp API level 6 or higher is supported. + ActNotify ScmpAction = iota // ActErrno causes the syscall to return a negative error code. This // code can be set with the SetReturnCode method ActErrno ScmpAction = iota @@ -181,6 +253,21 @@ const ( CompareMaskedEqual ScmpCompareOp = iota ) +var ( + // ErrSyscallDoesNotExist represents an error condition where + // libseccomp is unable to resolve the syscall + ErrSyscallDoesNotExist = fmt.Errorf("could not resolve syscall name") +) + +const ( + // Userspace notification response flags + + // NotifRespFlagContinue tells the kernel to continue executing the system + // call that triggered the notification. Must only be used when the notication + // response's error is 0. + NotifRespFlagContinue uint32 = 1 +) + // Helpers for types // GetArchFromString returns an ScmpArch constant from a string representing an @@ -223,6 +310,10 @@ func GetArchFromString(arch string) (ScmpArch, error) { return ArchS390, nil case "s390x": return ArchS390X, nil + case "parisc": + return ArchPARISC, nil + case "parisc64": + return ArchPARISC64, nil default: return ArchInvalid, fmt.Errorf("cannot convert unrecognized string %q", arch) } @@ -263,6 +354,10 @@ func (a ScmpArch) String() string { return "s390" case ArchS390X: return "s390x" + case ArchPARISC: + return "parisc" + case ArchPARISC64: + return "parisc64" case ArchNative: return "native" case ArchInvalid: @@ -310,6 +405,8 @@ func (a ScmpAction) String() string { case ActTrace: return fmt.Sprintf("Action: Notify tracing processes with code %d", (a >> 16)) + case ActNotify: + return "Action: Notify userspace" case ActLog: return "Action: Log system call" case ActAllow: @@ -349,7 +446,7 @@ func GetLibraryVersion() (major, minor, micro uint) { // Returns a positive int containing the API level, or 0 with an error if the // API level could not be detected due to the library being older than v2.4.0. // See the seccomp_api_get(3) man page for details on available API levels: -// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3 +// https://github.com/seccomp/libseccomp/blob/main/doc/man/man3/seccomp_api_get.3 func GetAPI() (uint, error) { return getAPI() } @@ -359,7 +456,7 @@ func GetAPI() (uint, error) { // Returns an error if the API level could not be set. An error is always // returned if the library is older than v2.4.0 // See the seccomp_api_get(3) man page for details on available API levels: -// https://github.com/seccomp/libseccomp/blob/master/doc/man/man3/seccomp_api_get.3 +// https://github.com/seccomp/libseccomp/blob/main/doc/man/man3/seccomp_api_get.3 func SetAPI(api uint) error { return setAPI(api) } @@ -386,7 +483,7 @@ func (s ScmpSyscall) GetNameByArch(arch ScmpArch) (string, error) { cString := C.seccomp_syscall_resolve_num_arch(arch.toNative(), C.int(s)) if cString == nil { - return "", fmt.Errorf("could not resolve syscall name for %#x", int32(s)) + return "", ErrSyscallDoesNotExist } defer C.free(unsafe.Pointer(cString)) @@ -409,7 +506,7 @@ func GetSyscallFromName(name string) (ScmpSyscall, error) { result := C.seccomp_syscall_resolve_name(cString) if result == scmpError { - return 0, fmt.Errorf("could not resolve name to syscall: %q", name) + return 0, ErrSyscallDoesNotExist } return ScmpSyscall(result), nil @@ -433,7 +530,7 @@ func GetSyscallFromNameByArch(name string, arch ScmpArch) (ScmpSyscall, error) { result := C.seccomp_syscall_resolve_name_arch(arch.toNative(), cString) if result == scmpError { - return 0, fmt.Errorf("could not resolve name to syscall: %q on %v", name, arch) + return 0, ErrSyscallDoesNotExist } return ScmpSyscall(result), nil @@ -506,11 +603,10 @@ type ScmpFilter struct { lock sync.Mutex } -// NewFilter creates and returns a new filter context. -// Accepts a default action to be taken for syscalls which match no rules in -// the filter. -// Returns a reference to a valid filter context, or nil and an error if the -// filter context could not be created or an invalid default action was given. +// NewFilter creates and returns a new filter context. Accepts a default action to be +// taken for syscalls which match no rules in the filter. +// Returns a reference to a valid filter context, or nil and an error +// if the filter context could not be created or an invalid default action was given. func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) { if err := ensureSupportedVersion(); err != nil { return nil, err @@ -530,8 +626,8 @@ func NewFilter(defaultAction ScmpAction) (*ScmpFilter, error) { filter.valid = true runtime.SetFinalizer(filter, filterFinalizer) - // Enable TSync so all goroutines will receive the same rules - // If the kernel does not support TSYNC, allow us to continue without error + // Enable TSync so all goroutines will receive the same rules. + // If the kernel does not support TSYNC, allow us to continue without error. if err := filter.setFilterAttr(filterAttrTsync, 0x1); err != nil && err != syscall.ENOTSUP { filter.Release() return nil, fmt.Errorf("could not create filter - error setting tsync bit: %v", err) @@ -778,8 +874,9 @@ func (f *ScmpFilter) GetNoNewPrivsBit() (bool, error) { func (f *ScmpFilter) GetLogBit() (bool, error) { log, err := f.getFilterAttr(filterAttrLog) if err != nil { - api, apiErr := getAPI() - if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) { + // Ignore error, if not supported returns apiLevel == 0 + apiLevel, _ := GetAPI() + if apiLevel < 3 { return false, fmt.Errorf("getting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher") } @@ -793,6 +890,30 @@ func (f *ScmpFilter) GetLogBit() (bool, error) { return true, nil } +// GetSSB returns the current state the SSB bit will be set to on the filter +// being loaded, or an error if an issue was encountered retrieving the value. +// The SSB bit tells the kernel that a seccomp user is not interested in enabling +// Speculative Store Bypass mitigation. +// The SSB bit is only usable when libseccomp API level 4 or higher is +// supported. +func (f *ScmpFilter) GetSSB() (bool, error) { + ssb, err := f.getFilterAttr(filterAttrSSB) + if err != nil { + api, apiErr := getAPI() + if (apiErr != nil && api == 0) || (apiErr == nil && api < 4) { + return false, fmt.Errorf("getting the SSB flag is only supported in libseccomp 2.5.0 and newer with API level 4 or higher") + } + + return false, err + } + + if ssb == 0 { + return false, nil + } + + return true, nil +} + // SetBadArchAction sets the default action taken on a syscall for an // architecture not in the filter, or an error if an issue was encountered // setting the value. @@ -832,8 +953,9 @@ func (f *ScmpFilter) SetLogBit(state bool) error { err := f.setFilterAttr(filterAttrLog, toSet) if err != nil { - api, apiErr := getAPI() - if (apiErr != nil && api == 0) || (apiErr == nil && api < 3) { + // Ignore error, if not supported returns apiLevel == 0 + apiLevel, _ := GetAPI() + if apiLevel < 3 { return fmt.Errorf("setting the log bit is only supported in libseccomp 2.4.0 and newer with API level 3 or higher") } } @@ -841,6 +963,28 @@ func (f *ScmpFilter) SetLogBit(state bool) error { return err } +// SetSSB sets the state of the SSB bit, which will be applied on filter +// load, or an error if an issue was encountered setting the value. +// The SSB bit is only usable when libseccomp API level 4 or higher is +// supported. +func (f *ScmpFilter) SetSSB(state bool) error { + var toSet C.uint32_t = 0x0 + + if state { + toSet = 0x1 + } + + err := f.setFilterAttr(filterAttrSSB, toSet) + if err != nil { + api, apiErr := getAPI() + if (apiErr != nil && api == 0) || (apiErr == nil && api < 4) { + return fmt.Errorf("setting the SSB flag is only supported in libseccomp 2.5.0 and newer with API level 4 or higher") + } + } + + return err +} + // SetSyscallPriority sets a syscall's priority. // This provides a hint to the filter generator in libseccomp about the // importance of this syscall. High-priority syscalls are placed @@ -947,3 +1091,36 @@ func (f *ScmpFilter) ExportBPF(file *os.File) error { return nil } + +// Userspace Notification API + +// GetNotifFd returns the userspace notification file descriptor associated with the given +// filter context. Such a file descriptor is only valid after the filter has been loaded +// and only when the filter uses the ActNotify action. The file descriptor can be used to +// retrieve and respond to notifications associated with the filter (see NotifReceive(), +// NotifRespond(), and NotifIDValid()). +func (f *ScmpFilter) GetNotifFd() (ScmpFd, error) { + return f.getNotifFd() +} + +// NotifReceive retrieves a seccomp userspace notification from a filter whose ActNotify +// action has triggered. The caller is expected to process the notification and return a +// response via NotifRespond(). Each invocation of this function returns one +// notification. As multiple notifications may be pending at any time, this function is +// normally called within a polling loop. +func NotifReceive(fd ScmpFd) (*ScmpNotifReq, error) { + return notifReceive(fd) +} + +// NotifRespond responds to a notification retrieved via NotifReceive(). The response Id +// must match that of the corresponding notification retrieved via NotifReceive(). +func NotifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error { + return notifRespond(fd, scmpResp) +} + +// NotifIDValid checks if a notification is still valid. An return value of nil means the +// notification is still valid. Otherwise the notification is not valid. This can be used +// to mitigate time-of-check-time-of-use (TOCTOU) attacks as described in seccomp_notify_id_valid(2). +func NotifIDValid(fd ScmpFd, id uint64) error { + return notifIDValid(fd, id) +} diff --git a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go index 0982e930f9e..8dc7b296f3b 100644 --- a/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go +++ b/vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go @@ -14,6 +14,13 @@ import ( // Get the seccomp header in scope // Need stdlib.h for free() on cstrings +// To compile libseccomp-golang against a specific version of libseccomp: +// cd ../libseccomp && mkdir -p prefix +// ./configure --prefix=$PWD/prefix && make && make install +// cd ../libseccomp-golang +// PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make +// LD_PRELOAD=$PWD/../libseccomp/prefix/lib/libseccomp.so.2.5.0 PKG_CONFIG_PATH=$PWD/../libseccomp/prefix/lib/pkgconfig/ make test + // #cgo pkg-config: libseccomp /* #include @@ -50,6 +57,14 @@ const uint32_t C_ARCH_BAD = ARCH_BAD; #define SCMP_ARCH_S390X ARCH_BAD #endif +#ifndef SCMP_ARCH_PARISC +#define SCMP_ARCH_PARISC ARCH_BAD +#endif + +#ifndef SCMP_ARCH_PARISC64 +#define SCMP_ARCH_PARISC64 ARCH_BAD +#endif + const uint32_t C_ARCH_NATIVE = SCMP_ARCH_NATIVE; const uint32_t C_ARCH_X86 = SCMP_ARCH_X86; const uint32_t C_ARCH_X86_64 = SCMP_ARCH_X86_64; @@ -67,6 +82,8 @@ const uint32_t C_ARCH_PPC64 = SCMP_ARCH_PPC64; const uint32_t C_ARCH_PPC64LE = SCMP_ARCH_PPC64LE; const uint32_t C_ARCH_S390 = SCMP_ARCH_S390; const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X; +const uint32_t C_ARCH_PARISC = SCMP_ARCH_PARISC; +const uint32_t C_ARCH_PARISC64 = SCMP_ARCH_PARISC64; #ifndef SCMP_ACT_LOG #define SCMP_ACT_LOG 0x7ffc0000U @@ -80,6 +97,10 @@ const uint32_t C_ARCH_S390X = SCMP_ARCH_S390X; #define SCMP_ACT_KILL_THREAD 0x00000000U #endif +#ifndef SCMP_ACT_NOTIFY +#define SCMP_ACT_NOTIFY 0x7fc00000U +#endif + const uint32_t C_ACT_KILL = SCMP_ACT_KILL; const uint32_t C_ACT_KILL_PROCESS = SCMP_ACT_KILL_PROCESS; const uint32_t C_ACT_KILL_THREAD = SCMP_ACT_KILL_THREAD; @@ -88,6 +109,7 @@ const uint32_t C_ACT_ERRNO = SCMP_ACT_ERRNO(0); const uint32_t C_ACT_TRACE = SCMP_ACT_TRACE(0); const uint32_t C_ACT_LOG = SCMP_ACT_LOG; const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW; +const uint32_t C_ACT_NOTIFY = SCMP_ACT_NOTIFY; // The libseccomp SCMP_FLTATR_CTL_LOG member of the scmp_filter_attr enum was // added in v2.4.0 @@ -95,12 +117,16 @@ const uint32_t C_ACT_ALLOW = SCMP_ACT_ALLOW; (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 4) #define SCMP_FLTATR_CTL_LOG _SCMP_FLTATR_MIN #endif +#if SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5 +#define SCMP_FLTATR_CTL_SSB _SCMP_FLTATR_MIN +#endif const uint32_t C_ATTRIBUTE_DEFAULT = (uint32_t)SCMP_FLTATR_ACT_DEFAULT; const uint32_t C_ATTRIBUTE_BADARCH = (uint32_t)SCMP_FLTATR_ACT_BADARCH; const uint32_t C_ATTRIBUTE_NNP = (uint32_t)SCMP_FLTATR_CTL_NNP; const uint32_t C_ATTRIBUTE_TSYNC = (uint32_t)SCMP_FLTATR_CTL_TSYNC; const uint32_t C_ATTRIBUTE_LOG = (uint32_t)SCMP_FLTATR_CTL_LOG; +const uint32_t C_ATTRIBUTE_SSB = (uint32_t)SCMP_FLTATR_CTL_SSB; const int C_CMP_NE = (int)SCMP_CMP_NE; const int C_CMP_LT = (int)SCMP_CMP_LT; @@ -189,6 +215,51 @@ void add_struct_arg_cmp( return; } + +// The seccomp notify API functions were added in v2.5.0 +#if (SCMP_VER_MAJOR < 2) || \ + (SCMP_VER_MAJOR == 2 && SCMP_VER_MINOR < 5) + +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +int seccomp_notify_alloc(struct seccomp_notif **req, struct seccomp_notif_resp **resp) { + return -EOPNOTSUPP; +} +int seccomp_notify_fd(const scmp_filter_ctx ctx) { + return -EOPNOTSUPP; +} +void seccomp_notify_free(struct seccomp_notif *req, struct seccomp_notif_resp *resp) { +} +int seccomp_notify_id_valid(int fd, uint64_t id) { + return -EOPNOTSUPP; +} +int seccomp_notify_receive(int fd, struct seccomp_notif *req) { + return -EOPNOTSUPP; +} +int seccomp_notify_respond(int fd, struct seccomp_notif_resp *resp) { + return -EOPNOTSUPP; +} + +#endif */ import "C" @@ -203,6 +274,7 @@ const ( filterAttrNNP scmpFilterAttr = iota filterAttrTsync scmpFilterAttr = iota filterAttrLog scmpFilterAttr = iota + filterAttrSSB scmpFilterAttr = iota ) const ( @@ -210,7 +282,7 @@ const ( scmpError C.int = -1 // Comparison boundaries to check for architecture validity archStart ScmpArch = ArchNative - archEnd ScmpArch = ArchS390X + archEnd ScmpArch = ArchPARISC64 // Comparison boundaries to check for action validity actionStart ScmpAction = ActKill actionEnd ScmpAction = ActKillProcess @@ -460,6 +532,10 @@ func archFromNative(a C.uint32_t) (ScmpArch, error) { return ArchS390, nil case C.C_ARCH_S390X: return ArchS390X, nil + case C.C_ARCH_PARISC: + return ArchPARISC, nil + case C.C_ARCH_PARISC64: + return ArchPARISC64, nil default: return 0x0, fmt.Errorf("unrecognized architecture %#x", uint32(a)) } @@ -500,6 +576,10 @@ func (a ScmpArch) toNative() C.uint32_t { return C.C_ARCH_S390 case ArchS390X: return C.C_ARCH_S390X + case ArchPARISC: + return C.C_ARCH_PARISC + case ArchPARISC64: + return C.C_ARCH_PARISC64 case ArchNative: return C.C_ARCH_NATIVE default: @@ -548,6 +628,8 @@ func actionFromNative(a C.uint32_t) (ScmpAction, error) { return ActLog, nil case C.C_ACT_ALLOW: return ActAllow, nil + case C.C_ACT_NOTIFY: + return ActNotify, nil default: return 0x0, fmt.Errorf("unrecognized action %#x", uint32(a)) } @@ -572,6 +654,8 @@ func (a ScmpAction) toNative() C.uint32_t { return C.C_ACT_LOG case ActAllow: return C.C_ACT_ALLOW + case ActNotify: + return C.C_ACT_NOTIFY default: return 0x0 } @@ -590,7 +674,181 @@ func (a scmpFilterAttr) toNative() uint32 { return uint32(C.C_ATTRIBUTE_TSYNC) case filterAttrLog: return uint32(C.C_ATTRIBUTE_LOG) + case filterAttrSSB: + return uint32(C.C_ATTRIBUTE_SSB) default: return 0x0 } } + +func (a ScmpSyscall) toNative() C.uint32_t { + return C.uint32_t(a) +} + +func syscallFromNative(a C.int) ScmpSyscall { + return ScmpSyscall(a) +} + +func notifReqFromNative(req *C.struct_seccomp_notif) (*ScmpNotifReq, error) { + scmpArgs := make([]uint64, 6) + for i := 0; i < len(scmpArgs); i++ { + scmpArgs[i] = uint64(req.data.args[i]) + } + + arch, err := archFromNative(req.data.arch) + if err != nil { + return nil, err + } + + scmpData := ScmpNotifData{ + Syscall: syscallFromNative(req.data.nr), + Arch: arch, + InstrPointer: uint64(req.data.instruction_pointer), + Args: scmpArgs, + } + + scmpReq := &ScmpNotifReq{ + ID: uint64(req.id), + Pid: uint32(req.pid), + Flags: uint32(req.flags), + Data: scmpData, + } + + return scmpReq, nil +} + +func (scmpResp *ScmpNotifResp) toNative(resp *C.struct_seccomp_notif_resp) { + resp.id = C.__u64(scmpResp.ID) + resp.val = C.__s64(scmpResp.Val) + resp.error = (C.__s32(scmpResp.Error) * -1) // kernel requires a negated value + resp.flags = C.__u32(scmpResp.Flags) +} + +// Userspace Notification API +// Calls to C.seccomp_notify* hidden from seccomp.go + +func (f *ScmpFilter) getNotifFd() (ScmpFd, error) { + f.lock.Lock() + defer f.lock.Unlock() + + if !f.valid { + return -1, errBadFilter + } + + // Ignore error, if not supported returns apiLevel == 0 + apiLevel, _ := GetAPI() + if apiLevel < 6 { + return -1, fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + } + + fd := C.seccomp_notify_fd(f.filterCtx) + + return ScmpFd(fd), nil +} + +func notifReceive(fd ScmpFd) (*ScmpNotifReq, error) { + var req *C.struct_seccomp_notif + var resp *C.struct_seccomp_notif_resp + + // Ignore error, if not supported returns apiLevel == 0 + apiLevel, _ := GetAPI() + if apiLevel < 6 { + return nil, fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + } + + // we only use the request here; the response is unused + if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 { + return nil, errRc(retCode) + } + + defer func() { + C.seccomp_notify_free(req, resp) + }() + + for { + retCode, errno := C.seccomp_notify_receive(C.int(fd), req) + if retCode == 0 { + break + } + + if errno == syscall.EINTR { + continue + } + + if errno == syscall.ENOENT { + return nil, errno + } + + return nil, errRc(retCode) + } + + return notifReqFromNative(req) +} + +func notifRespond(fd ScmpFd, scmpResp *ScmpNotifResp) error { + var req *C.struct_seccomp_notif + var resp *C.struct_seccomp_notif_resp + + // Ignore error, if not supported returns apiLevel == 0 + apiLevel, _ := GetAPI() + if apiLevel < 6 { + return fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + } + + // we only use the reponse here; the request is discarded + if retCode := C.seccomp_notify_alloc(&req, &resp); retCode != 0 { + return errRc(retCode) + } + + defer func() { + C.seccomp_notify_free(req, resp) + }() + + scmpResp.toNative(resp) + + for { + retCode, errno := C.seccomp_notify_respond(C.int(fd), resp) + if retCode == 0 { + break + } + + if errno == syscall.EINTR { + continue + } + + if errno == syscall.ENOENT { + return errno + } + + return errRc(retCode) + } + + return nil +} + +func notifIDValid(fd ScmpFd, id uint64) error { + // Ignore error, if not supported returns apiLevel == 0 + apiLevel, _ := GetAPI() + if apiLevel < 6 { + return fmt.Errorf("seccomp notification requires API level >= 6; current level = %d", apiLevel) + } + + for { + retCode, errno := C.seccomp_notify_id_valid(C.int(fd), C.uint64_t(id)) + if retCode == 0 { + break + } + + if errno == syscall.EINTR { + continue + } + + if errno == syscall.ENOENT { + return errno + } + + return errRc(retCode) + } + + return nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go index cb25b437575..2a9f2dc3b94 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -34,6 +34,8 @@ const ( DiffInsert Operation = 1 // DiffEqual item represents an equal diff. DiffEqual Operation = 0 + //IndexSeparator is used to seperate the array indexes in an index string + IndexSeparator = "," ) // Diff represents one diff operation @@ -120,7 +122,7 @@ func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, d // Restore the prefix and suffix. if len(commonprefix) != 0 { - diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + diffs = append([]Diff{{DiffEqual, string(commonprefix)}}, diffs...) } if len(commonsuffix) != 0 { diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) @@ -165,8 +167,8 @@ func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, dea // Single character string. // After the previous speedup, the character can't be an equality. return []Diff{ - Diff{DiffDelete, string(text1)}, - Diff{DiffInsert, string(text2)}, + {DiffDelete, string(text1)}, + {DiffInsert, string(text2)}, } // Check to see if the problem can be split in two. } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { @@ -193,7 +195,7 @@ func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, dea // diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { // Scan the text on a line-by-line basis first. - text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + text1, text2, linearray := dmp.DiffLinesToRunes(string(text1), string(text2)) diffs := dmp.diffMainRunes(text1, text2, false, deadline) @@ -368,8 +370,8 @@ func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) } // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. return []Diff{ - Diff{DiffDelete, string(runes1)}, - Diff{DiffInsert, string(runes2)}, + {DiffDelete, string(runes1)}, + {DiffInsert, string(runes2)}, } } @@ -390,66 +392,28 @@ func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, // DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. // It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { - chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) - return string(chars1), string(chars2), lineArray -} - -// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. -func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { - // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. - lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' - lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 - - chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) - chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) - + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) return chars1, chars2, lineArray } -func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { - return dmp.DiffLinesToRunes(string(text1), string(text2)) -} - -// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. -// We use strings instead of []runes as input mainly because you can't use []rune as a map key. -func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { - // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. - lineStart := 0 - lineEnd := -1 - runes := []rune{} - - for lineEnd < len(text)-1 { - lineEnd = indexOf(text, "\n", lineStart) - - if lineEnd == -1 { - lineEnd = len(text) - 1 - } - - line := text[lineStart : lineEnd+1] - lineStart = lineEnd + 1 - lineValue, ok := lineHash[line] - - if ok { - runes = append(runes, rune(lineValue)) - } else { - *lineArray = append(*lineArray, line) - lineHash[line] = len(*lineArray) - 1 - runes = append(runes, rune(len(*lineArray)-1)) - } - } - - return runes +// DiffLinesToRunes splits two texts into a list of runes. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return []rune(chars1), []rune(chars2), lineArray } // DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { hydrated := make([]Diff, 0, len(diffs)) for _, aDiff := range diffs { - chars := aDiff.Text + chars := strings.Split(aDiff.Text, IndexSeparator) text := make([]string, len(chars)) for i, r := range chars { - text[i] = lineArray[r] + i1, err := strconv.Atoi(r) + if err == nil { + text[i] = lineArray[i1] + } } aDiff.Text = strings.Join(text, "") @@ -670,16 +634,16 @@ func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { // An insertion or deletion. if diffs[pointer].Type == DiffInsert { - lengthInsertions2 += len(diffs[pointer].Text) + lengthInsertions2 += utf8.RuneCountInString(diffs[pointer].Text) } else { - lengthDeletions2 += len(diffs[pointer].Text) + lengthDeletions2 += utf8.RuneCountInString(diffs[pointer].Text) } // Eliminate an equality that is smaller or equal to the edits on both sides of it. difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) - if len(lastequality) > 0 && - (len(lastequality) <= difference1) && - (len(lastequality) <= difference2) { + if utf8.RuneCountInString(lastequality) > 0 && + (utf8.RuneCountInString(lastequality) <= difference1) && + (utf8.RuneCountInString(lastequality) <= difference2) { // Duplicate record. insPoint := equalities[len(equalities)-1] diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) @@ -728,8 +692,8 @@ func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) if overlapLength1 >= overlapLength2 { - if float64(overlapLength1) >= float64(len(deletion))/2 || - float64(overlapLength1) >= float64(len(insertion))/2 { + if float64(overlapLength1) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength1) >= float64(utf8.RuneCountInString(insertion))/2 { // Overlap found. Insert an equality and trim the surrounding edits. diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) @@ -739,8 +703,8 @@ func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { pointer++ } } else { - if float64(overlapLength2) >= float64(len(deletion))/2 || - float64(overlapLength2) >= float64(len(insertion))/2 { + if float64(overlapLength2) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength2) >= float64(utf8.RuneCountInString(insertion))/2 { // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. overlap := Diff{DiffEqual, deletion[:overlapLength2]} diffs = splice(diffs, pointer, 0, overlap) @@ -1029,7 +993,7 @@ func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { if x > 0 && diffs[x-1].Type == DiffEqual { diffs[x-1].Text += string(textInsert[:commonlength]) } else { - diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + diffs = append([]Diff{{DiffEqual, string(textInsert[:commonlength])}}, diffs...) pointer++ } textInsert = textInsert[commonlength:] @@ -1343,3 +1307,46 @@ func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Di return diffs, nil } + +// diffLinesToStrings splits two texts into a list of strings. Each string represents one line. +func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + + //Each string has the index of lineArray which it points to + strIndexArray1 := dmp.diffLinesToStringsMunge(text1, &lineArray) + strIndexArray2 := dmp.diffLinesToStringsMunge(text2, &lineArray) + + return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray +} + +// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string. +func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string) []uint32 { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + lineStart := 0 + lineEnd := -1 + strs := []uint32{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + strs = append(strs, uint32(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + strs = append(strs, uint32(len(*lineArray)-1)) + } + } + + return strs +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go index 223c43c4268..0dbe3bdd7de 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -324,7 +324,7 @@ func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { paddingLength := dmp.PatchMargin nullPadding := "" for x := 1; x <= paddingLength; x++ { - nullPadding += string(x) + nullPadding += string(rune(x)) } // Bump all the patches forward. diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go index 265f29cc7e5..44c43595478 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -9,6 +9,7 @@ package diffmatchpatch import ( + "strconv" "strings" "unicode/utf8" ) @@ -86,3 +87,20 @@ func runesIndex(r1, r2 []rune) int { } return -1 } + +func intArrayToString(ns []uint32) string { + if len(ns) == 0 { + return "" + } + + indexSeparator := IndexSeparator[0] + + // Appr. 3 chars per num plus the comma. + b := []byte{} + for _, n := range ns { + b = strconv.AppendInt(b, int64(n), 10) + b = append(b, indexSeparator) + } + b = b[:len(b)-1] + return string(b) +} diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS new file mode 100644 index 00000000000..4c5ac3dd997 --- /dev/null +++ b/vendor/github.com/spf13/cobra/MAINTAINERS @@ -0,0 +1,13 @@ +maintainers: +- spf13 +- johnSchnake +- jpmcb +- marckhouzam +inactive: +- anthonyfok +- bep +- bogem +- broady +- eparis +- jharshman +- wfernandes diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index 472c73bf16f..443ef1a987a 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -9,11 +9,11 @@ ifeq (, $(shell which richgo)) $(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") endif -.PHONY: fmt lint test cobra_generator install_deps clean +.PHONY: fmt lint test install_deps clean default: all -all: fmt test cobra_generator +all: fmt test fmt: $(info ******************** checking formatting ********************) @@ -23,15 +23,10 @@ lint: $(info ******************** running lint tools ********************) golangci-lint run -v -test: install_deps lint +test: install_deps $(info ******************** running tests ********************) richgo test -v ./... -cobra_generator: install_deps - $(info ******************** building generator ********************) - mkdir -p $(BIN) - make -C cobra all - install_deps: $(info ******************** downloading dependencies ********************) go get -v ./... diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 074e3979f89..7adef143b42 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,52 +1,26 @@ ![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. +Cobra is a library for creating powerful modern CLI applications. Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), [Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. [![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) +[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Usage](#usage) - * [Using the Cobra Generator](user_guide.md#using-the-cobra-generator) - * [Using the Cobra Library](user_guide.md#using-the-cobra-library) - * [Working with Flags](user_guide.md#working-with-flags) - * [Positional and Custom Arguments](user_guide.md#positional-and-custom-arguments) - * [Example](user_guide.md#example) - * [Help Command](user_guide.md#help-command) - * [Usage Message](user_guide.md#usage-message) - * [PreRun and PostRun Hooks](user_guide.md#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](user_guide.md#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](user_guide.md#generating-documentation-for-your-command) - * [Generating shell completions](user_guide.md#generating-shell-completions) -- [Contributing](CONTRIBUTING.md) -- [License](#license) - # Overview Cobra is a library providing a simple interface to create powerful modern CLI interfaces similar to git & go tools. -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - Cobra provides: * Easy subcommand-based CLIs: `app server`, `app fetch`, etc. * Fully POSIX-compliant flags (including short & long versions) * Nested subcommands * Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` * Intelligent suggestions (`app srver`... did you mean `app server`?) * Automatic help generation for commands and flags * Automatic help flag recognition of `-h`, `--help`, etc. @@ -54,7 +28,7 @@ Cobra provides: * Automatically generated man pages for your application * Command aliases so you can change things without breaking them * The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps +* Optional seamless integration with [viper](http://github.com/spf13/viper) for 12-factor apps # Concepts @@ -88,7 +62,7 @@ have children commands and optionally run an action. In the example above, 'server' is the command. -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) +[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) ## Flags @@ -105,10 +79,11 @@ which maintains the same interface while adding POSIX compliance. # Installing Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: +of the library. - go get -u github.com/spf13/cobra +``` +go get -u github.com/spf13/cobra@latest +``` Next, include Cobra in your application: @@ -117,8 +92,19 @@ import "github.com/spf13/cobra" ``` # Usage +`cobra-cli` is a command line program to generate cobra applications and command files. +It will bootstrap your application scaffolding to rapidly +develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application. + +It can be installed by running: + +``` +go install github.com/spf13/cobra-cli@latest +``` + +For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/master/README.md) -See [User Guide](user_guide.md). +For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 70e9b262912..20a022b3084 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -107,3 +107,15 @@ func RangeArgs(min int, max int) PositionalArgs { return nil } } + +// MatchAll allows combining several PositionalArgs to work in concert. +func MatchAll(pargs ...PositionalArgs) PositionalArgs { + return func(cmd *Command, args []string) error { + for _, parg := range pargs { + if err := parg(cmd, args); err != nil { + return err + } + } + return nil + } +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 733f4d1211d..6c360c595fd 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -24,7 +24,7 @@ func writePreamble(buf io.StringWriter, name string) { WriteStringAndCheck(buf, fmt.Sprintf(` __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -134,7 +134,7 @@ __%[1]s_handle_go_custom_completion() $filteringCmd elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then # File completion for directories only - local subDir + local subdir # Use printf to strip any trailing newline subdir=$(printf "%%s" "${out[0]}") if [ -n "$subdir" ]; then @@ -187,13 +187,19 @@ __%[1]s_handle_reply() PREFIX="" cur="${cur#*=}" ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then + if [ -n "${ZSH_VERSION:-}" ]; then # zsh completion needs --flag= prefix eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" fi fi fi - return 0; + + if [[ -z "${flag_parsing_disabled}" ]]; then + # If flag parsing is enabled, we have completed the flags and can return. + # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough + # to possibly call handle_go_custom_completion. + return 0; + fi ;; esac @@ -232,13 +238,13 @@ __%[1]s_handle_reply() fi if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi + if declare -F __%[1]s_custom_func >/dev/null; then + # try command name qualified custom func + __%[1]s_custom_func + else + # otherwise fall back to unqualified for compatibility + declare -F __custom_func >/dev/null && __custom_func + fi fi # available in bash-completion >= 2, not always present on macOS @@ -272,7 +278,7 @@ __%[1]s_handle_flag() # if a command required a flag, and we found it, unset must_have_one_flag() local flagname=${words[c]} - local flagvalue + local flagvalue="" # if the word contained an = if [[ ${words[c]} == *"="* ]]; then flagvalue=${flagname#*=} # take in as flagvalue after the = @@ -291,7 +297,7 @@ __%[1]s_handle_flag() # keep flag value with flagname as flaghash # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then if [ -n "${flagvalue}" ] ; then flaghash[${flagname}]=${flagvalue} elif [ -n "${words[ $((c+1)) ]}" ] ; then @@ -303,7 +309,7 @@ __%[1]s_handle_flag() # skip the argument to a two word flag if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" + __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" c=$((c+1)) # if we are looking for a flags value, don't show commands if [[ $c -eq $cword ]]; then @@ -363,7 +369,7 @@ __%[1]s_handle_word() __%[1]s_handle_command elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then words[c]=${aliashash[${words[c]}]} __%[1]s_handle_command else @@ -394,6 +400,7 @@ func writePostscript(buf io.StringWriter, name string) { fi local c=0 + local flag_parsing_disabled= local flags=() local two_word_flags=() local local_nonpersistent_flags=() @@ -403,8 +410,8 @@ func writePostscript(buf io.StringWriter, name string) { local command_aliases=() local must_have_one_flag=() local must_have_one_noun=() - local has_completion_function - local last_command + local has_completion_function="" + local last_command="" local nouns=() local noun_aliases=() @@ -535,6 +542,11 @@ func writeFlags(buf io.StringWriter, cmd *Command) { flags_completion=() `) + + if cmd.DisableFlagParsing { + WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") + } + localNonPersistentFlags := cmd.LocalNonPersistentFlags() cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -609,7 +621,7 @@ func writeCmdAliases(buf io.StringWriter, cmd *Command) { sort.Strings(cmd.Aliases) - WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) for _, value := range cmd.Aliases { WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 8859b57c42c..82d26c17568 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -138,13 +138,42 @@ __%[1]s_process_completion_results() { _filedir -d fi else - __%[1]s_handle_standard_completion_case + __%[1]s_handle_completion_types fi __%[1]s_handle_special_char "$cur" : __%[1]s_handle_special_char "$cur" = } +__%[1]s_handle_completion_types() { + __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" + + case $COMP_TYPE in + 37|42) + # Type: menu-complete/menu-complete-backward and insert-completions + # If the user requested inserting one completion at a time, or all + # completions at once on the command-line we must remove the descriptions. + # https://github.com/spf13/cobra/issues/1508 + local tab comp + tab=$(printf '\t') + while IFS='' read -r comp; do + # Strip any description + comp=${comp%%%%$tab*} + # Only consider the completions that match + comp=$(compgen -W "$comp" -- "$cur") + if [ -n "$comp" ]; then + COMPREPLY+=("$comp") + fi + done < <(printf "%%s\n" "${out[@]}") + ;; + + *) + # Type: complete (normal completion) + __%[1]s_handle_standard_completion_case + ;; + esac +} + __%[1]s_handle_standard_completion_case() { local tab comp tab=$(printf '\t') diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go index 6159c1cc19d..bb5dad90b7f 100644 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package cobra diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 8768b1736dc..a84f5a82aab 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package cobra diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b849b9c8444..9ecd56a472a 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -93,6 +93,8 @@ type CompletionOptions struct { // DisableDescriptions turns off all completion descriptions for shells // that support them DisableDescriptions bool + // HiddenDefaultCmd makes the default 'completion' command hidden + HiddenDefaultCmd bool } // NoFileCompletions can be used to disable file completion for commands that should @@ -226,7 +228,17 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if c.Root().TraverseChildren { finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) } else { - finalCmd, finalArgs, err = c.Root().Find(trimmedArgs) + // For Root commands that don't specify any value for their Args fields, when we call + // Find(), if those Root commands don't have any sub-commands, they will accept arguments. + // However, because we have added the __complete sub-command in the current code path, the + // call to Find() -> legacyArgs() will return an error if there are any arguments. + // To avoid this, we first remove the __complete command to get back to having no sub-commands. + rootCmd := c.Root() + if len(rootCmd.Commands()) == 1 { + rootCmd.RemoveCommand(c) + } + + finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) } if err != nil { // Unable to find the real command. E.g., someInvalidCmd @@ -266,6 +278,12 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } } + // We only remove the flags from the arguments if DisableFlagParsing is not set. + // This is important for commands which have requested to do their own flag completion. + if !finalCmd.DisableFlagParsing { + finalArgs = finalCmd.Flags().Args() + } + if flag != nil && flagCompletion { // Check if we are completing a flag value subject to annotations if validExts, present := flag.Annotations[BashCompFilenameExt]; present { @@ -290,12 +308,16 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } } + var completions []string + var directive ShellCompDirective + + // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; + // doing this allows for completion of persistant flag names even for commands that disable flag parsing. + // // When doing completion of a flag name, as soon as an argument starts with // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires // the flag name to be complete if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { - var completions []string - // First check for required flags completions = completeRequireFlags(finalCmd, toComplete) @@ -322,86 +344,86 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi }) } - directive := ShellCompDirectiveNoFileComp + directive = ShellCompDirectiveNoFileComp if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { // If there is a single completion, the shell usually adds a space // after the completion. We don't want that if the flag ends with an = directive = ShellCompDirectiveNoSpace } - return finalCmd, completions, directive, nil - } - // We only remove the flags from the arguments if DisableFlagParsing is not set. - // This is important for commands which have requested to do their own flag completion. - if !finalCmd.DisableFlagParsing { - finalArgs = finalCmd.Flags().Args() - } - - var completions []string - directive := ShellCompDirectiveDefault - if flag == nil { - foundLocalNonPersistentFlag := false - // If TraverseChildren is true on the root command we don't check for - // local flags because we can use a local flag on a parent command - if !finalCmd.Root().TraverseChildren { - // Check if there are any local, non-persistent flags on the command-line - localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { - foundLocalNonPersistentFlag = true - } - }) + if !finalCmd.DisableFlagParsing { + // If DisableFlagParsing==false, we have completed the flags as known by Cobra; + // we can return what we found. + // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we + // let the logic continue to see if ValidArgsFunction needs to be called. + return finalCmd, completions, directive, nil } + } else { + directive = ShellCompDirectiveDefault + if flag == nil { + foundLocalNonPersistentFlag := false + // If TraverseChildren is true on the root command we don't check for + // local flags because we can use a local flag on a parent command + if !finalCmd.Root().TraverseChildren { + // Check if there are any local, non-persistent flags on the command-line + localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { + foundLocalNonPersistentFlag = true + } + }) + } - // Complete subcommand names, including the help command - if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { - // We only complete sub-commands if: - // - there are no arguments on the command-line and - // - there are no local, non-persistent flags on the command-line or TraverseChildren is true - for _, subCmd := range finalCmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + // Complete subcommand names, including the help command + if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { + // We only complete sub-commands if: + // - there are no arguments on the command-line and + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true + for _, subCmd := range finalCmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + directive = ShellCompDirectiveNoFileComp } - directive = ShellCompDirectiveNoFileComp } } - } - // Complete required flags even without the '-' prefix - completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) - - // Always complete ValidArgs, even if we are completing a subcommand name. - // This is for commands that have both subcommands and ValidArgs. - if len(finalCmd.ValidArgs) > 0 { - if len(finalArgs) == 0 { - // ValidArgs are only for the first argument - for _, validArg := range finalCmd.ValidArgs { - if strings.HasPrefix(validArg, toComplete) { - completions = append(completions, validArg) + // Complete required flags even without the '-' prefix + completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) + + // Always complete ValidArgs, even if we are completing a subcommand name. + // This is for commands that have both subcommands and ValidArgs. + if len(finalCmd.ValidArgs) > 0 { + if len(finalArgs) == 0 { + // ValidArgs are only for the first argument + for _, validArg := range finalCmd.ValidArgs { + if strings.HasPrefix(validArg, toComplete) { + completions = append(completions, validArg) + } } - } - directive = ShellCompDirectiveNoFileComp - - // If no completions were found within commands or ValidArgs, - // see if there are any ArgAliases that should be completed. - if len(completions) == 0 { - for _, argAlias := range finalCmd.ArgAliases { - if strings.HasPrefix(argAlias, toComplete) { - completions = append(completions, argAlias) + directive = ShellCompDirectiveNoFileComp + + // If no completions were found within commands or ValidArgs, + // see if there are any ArgAliases that should be completed. + if len(completions) == 0 { + for _, argAlias := range finalCmd.ArgAliases { + if strings.HasPrefix(argAlias, toComplete) { + completions = append(completions, argAlias) + } } } } + + // If there are ValidArgs specified (even if they don't match), we stop completion. + // Only one of ValidArgs or ValidArgsFunction can be used for a single command. + return finalCmd, completions, directive, nil } - // If there are ValidArgs specified (even if they don't match), we stop completion. - // Only one of ValidArgs or ValidArgsFunction can be used for a single command. - return finalCmd, completions, directive, nil + // Let the logic continue so as to add any ValidArgsFunction completions, + // even if we already found sub-commands. + // This is for commands that have subcommands but also specify a ValidArgsFunction. } - - // Let the logic continue so as to add any ValidArgsFunction completions, - // even if we already found sub-commands. - // This is for commands that have subcommands but also specify a ValidArgsFunction. } // Find the completion function for the flag or command @@ -589,39 +611,43 @@ func (c *Command) initDefaultCompletionCmd() { completionCmd := &Command{ Use: compCmdName, - Short: "generate the autocompletion script for the specified shell", - Long: fmt.Sprintf(` -Generate the autocompletion script for %[1]s for the specified shell. + Short: "Generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. See each sub-command's help for details on how to use the generated script. `, c.Root().Name()), Args: NoArgs, ValidArgsFunction: NoFileCompletions, + Hidden: c.CompletionOptions.HiddenDefaultCmd, } c.AddCommand(completionCmd) out := c.OutOrStdout() noDesc := c.CompletionOptions.DisableDescriptions - shortDesc := "generate the autocompletion script for %s" + shortDesc := "Generate the autocompletion script for %s" bash := &Command{ Use: "bash", Short: fmt.Sprintf(shortDesc, "bash"), - Long: fmt.Sprintf(` -Generate the autocompletion script for the bash shell. + Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. This script depends on the 'bash-completion' package. If it is not installed already, you can install it via your OS's package manager. To load completions in your current shell session: -$ source <(%[1]s completion bash) + + source <(%[1]s completion bash) To load completions for every new session, execute once: -Linux: - $ %[1]s completion bash > /etc/bash_completion.d/%[1]s -MacOS: - $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s + +#### Linux: + + %[1]s completion bash > /etc/bash_completion.d/%[1]s + +#### macOS: + + %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s You will need to start a new shell for this setup to take effect. - `, c.Root().Name()), +`, c.Root().Name()), Args: NoArgs, DisableFlagsInUseLine: true, ValidArgsFunction: NoFileCompletions, @@ -636,19 +662,22 @@ You will need to start a new shell for this setup to take effect. zsh := &Command{ Use: "zsh", Short: fmt.Sprintf(shortDesc, "zsh"), - Long: fmt.Sprintf(` -Generate the autocompletion script for the zsh shell. + Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. If shell completion is not already enabled in your environment you will need to enable it. You can execute the following once: -$ echo "autoload -U compinit; compinit" >> ~/.zshrc + echo "autoload -U compinit; compinit" >> ~/.zshrc To load completions for every new session, execute once: -# Linux: -$ %[1]s completion zsh > "${fpath[1]}/_%[1]s" -# macOS: -$ %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s + +#### Linux: + + %[1]s completion zsh > "${fpath[1]}/_%[1]s" + +#### macOS: + + %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s You will need to start a new shell for this setup to take effect. `, c.Root().Name()), @@ -668,14 +697,15 @@ You will need to start a new shell for this setup to take effect. fish := &Command{ Use: "fish", Short: fmt.Sprintf(shortDesc, "fish"), - Long: fmt.Sprintf(` -Generate the autocompletion script for the fish shell. + Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. To load completions in your current shell session: -$ %[1]s completion fish | source + + %[1]s completion fish | source To load completions for every new session, execute once: -$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + + %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish You will need to start a new shell for this setup to take effect. `, c.Root().Name()), @@ -692,11 +722,11 @@ You will need to start a new shell for this setup to take effect. powershell := &Command{ Use: "powershell", Short: fmt.Sprintf(shortDesc, "powershell"), - Long: fmt.Sprintf(` -Generate the autocompletion script for powershell. + Long: fmt.Sprintf(`Generate the autocompletion script for powershell. To load completions in your current shell session: -PS C:\> %[1]s completion powershell | Out-String | Invoke-Expression + + %[1]s completion powershell | Out-String | Invoke-Expression To load completions for every new session, add the output of the above command to your powershell profile. diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 59234c09f18..62d719f0b33 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -50,7 +50,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { if ($Command.Length -gt $CursorPosition) { $Command=$Command.Substring(0,$CursorPosition) } - __%[1]s_debug "Truncated command: $Command" + __%[1]s_debug "Truncated command: $Command" $ShellCompDirectiveError=%[3]d $ShellCompDirectiveNoSpace=%[4]d @@ -58,7 +58,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { $ShellCompDirectiveFilterFileExt=%[6]d $ShellCompDirectiveFilterDirs=%[7]d - # Prepare the command to request completions for the program. + # Prepare the command to request completions for the program. # Split the command at the first space to separate the program and arguments. $Program,$Arguments = $Command.Split(" ",2) $RequestComp="$Program %[2]s $Arguments" @@ -233,7 +233,7 @@ Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { Default { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. - # Description will not be shown because thats not possible with TabCompleteNext + # Description will not be shown because that's not possible with TabCompleteNext [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") } } diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index d98a71e36f9..9674c348c0d 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -4,6 +4,7 @@ - [Bleve](http://www.blevesearch.com/) - [CockroachDB](http://www.cockroachlabs.com/) - [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) +- [Datree](https://github.com/datreeio/datree) - [Delve](https://github.com/derekparker/delve) - [Docker (distribution)](https://github.com/docker/distribution) - [Etcd](https://etcd.io/) @@ -14,25 +15,37 @@ - [GitHub Labeler](https://github.com/erdaltsksn/gh-label) - [Golangci-lint](https://golangci-lint.run) - [GopherJS](http://www.gopherjs.org/) +- [GoReleaser](https://goreleaser.com) - [Helm](https://helm.sh) - [Hugo](https://gohugo.io) +- [Infracost](https://github.com/infracost/infracost) - [Istio](https://istio.io) - [Kool](https://github.com/kool-dev/kool) - [Kubernetes](http://kubernetes.io/) - [Linkerd](https://linkerd.io/) - [Mattermost-server](https://github.com/mattermost/mattermost-server) +- [Mercure](https://mercure.rocks/) +- [Meroxa CLI](https://github.com/meroxa/cli) - [Metal Stack CLI](https://github.com/metal-stack/metalctl) - [Moby (former Docker)](https://github.com/moby/moby) +- [Moldy](https://github.com/Moldy-Community/moldy) +- [Multi-gitter](https://github.com/lindell/multi-gitter) - [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +- [nFPM](https://nfpm.goreleaser.com) - [OpenShift](https://www.openshift.com/) - [Ory Hydra](https://github.com/ory/hydra) - [Ory Kratos](https://github.com/ory/kratos) +- [Pixie](https://github.com/pixie-io/pixie) - [Pouch](https://github.com/alibaba/pouch) - [ProjectAtomic (enterprise)](http://www.projectatomic.io/) - [Prototool](https://github.com/uber/prototool) +- [QRcp](https://github.com/claudiodangelis/qrcp) - [Random](https://github.com/erdaltsksn/random) - [Rclone](https://rclone.org/) +- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) - [Twitch CLI](https://github.com/twitchdev/twitch-cli) +- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) - [Werf](https://werf.io/) diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index 4ba06a11c0d..33a4c65a5a3 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -16,10 +16,12 @@ If you do not wish to use the default `completion` command, you can choose to provide your own, which will take precedence over the default one. (This also provides backwards-compatibility with programs that already have their own `completion` command.) -If you are using the generator, you can create a completion command by running +If you are using the `cobra-cli` generator, +which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), +you can create a completion command by running ```bash -cobra add completion +cobra-cli add completion ``` and then modifying the generated `cmd/completion.go` file to look something like this (writing the shell script to stdout allows the most flexible use): @@ -28,17 +30,17 @@ and then modifying the generated `cmd/completion.go` file to look something like var completionCmd = &cobra.Command{ Use: "completion [bash|zsh|fish|powershell]", Short: "Generate completion script", - Long: `To load completions: + Long: fmt.Sprintf(`To load completions: Bash: - $ source <(yourprogram completion bash) + $ source <(%[1]s completion bash) # To load completions for each session, execute once: # Linux: - $ yourprogram completion bash > /etc/bash_completion.d/yourprogram + $ %[1]s completion bash > /etc/bash_completion.d/%[1]s # macOS: - $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram + $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s Zsh: @@ -48,25 +50,25 @@ Zsh: $ echo "autoload -U compinit; compinit" >> ~/.zshrc # To load completions for each session, execute once: - $ yourprogram completion zsh > "${fpath[1]}/_yourprogram" + $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" # You will need to start a new shell for this setup to take effect. fish: - $ yourprogram completion fish | source + $ %[1]s completion fish | source # To load completions for each session, execute once: - $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish + $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish PowerShell: - PS> yourprogram completion powershell | Out-String | Invoke-Expression + PS> %[1]s completion powershell | Out-String | Invoke-Expression # To load completions for every new session, run: - PS> yourprogram completion powershell > yourprogram.ps1 + PS> %[1]s completion powershell > %[1]s.ps1 # and source this file from your PowerShell profile. -`, +`,cmd.Root().Name()), DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, Args: cobra.ExactValidArgs(1), diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md index 311abce284d..4a3c2b0da53 100644 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -29,10 +29,10 @@ func main() { ## Using the Cobra Generator -Cobra provides its own program that will create your application and add any +Cobra-CLI is its own program that will create your application and add any commands you want. It's the easiest way to incorporate Cobra into your application. -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. +For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/master/README.md) ## Using the Cobra Library @@ -86,7 +86,7 @@ var ( userLicense string rootCmd = &cobra.Command{ - Use: "cobra", + Use: "cobra-cli", Short: "A generator for Cobra based Applications", Long: `Cobra is a CLI library for Go that empowers applications. This application is a tool to generate the needed files @@ -281,7 +281,7 @@ func init() { In this example, the persistent flag `author` is bound with `viper`. **Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. +when the `--author` flag is provided by user. More in [viper documentation](https://github.com/spf13/viper#working-with-flags). @@ -315,6 +315,7 @@ The following validators are built in: - `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. - `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` - `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. +- `MatchAll(pargs ...PositionalArgs)` - enables combining existing checks with arbitrary other checks (e.g. you want to check the ExactArgs length along with other qualities). An example of setting the custom validator: diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 1afec30ea92..624adab537b 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -202,7 +202,7 @@ _%[1]s() _arguments '*:filename:'"$filteringCmd" elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then # File completion for directories only - local subDir + local subdir subdir="${completions[1]}" if [ -n "$subdir" ]; then __%[1]s_debug "Listing directories in $subdir" @@ -250,7 +250,7 @@ _%[1]s() # don't run the completion function when being source-ed or eval-ed if [ "$funcstack[1]" = "_%[1]s" ]; then - _%[1]s + _%[1]s fi `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d26792..3bb22a9718e 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -3,6 +3,7 @@ package assert import ( "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +31,8 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } } return compareEqual, false @@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 00000000000..df22c47fc50 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatability +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 00000000000..1701af2a3c8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229a86..27e2420ed2e 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f07e..d9ea368d0a3 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b47182a7..75944878358 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401f57..0357b2231a2 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } subsetValue := reflect.ValueOf(subset) @@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md new file mode 100644 index 00000000000..dea3e409e11 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md @@ -0,0 +1,29 @@ +# LICENSE + +Copyright (c) 2018-2022, Sylabs Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go new file mode 100644 index 00000000000..d7acbb69447 --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go @@ -0,0 +1,69 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +var ( + hdrArchUnknown archType = [...]byte{'0', '0', '\x00'} + hdrArch386 archType = [...]byte{'0', '1', '\x00'} + hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'} + hdrArchARM archType = [...]byte{'0', '3', '\x00'} + hdrArchARM64 archType = [...]byte{'0', '4', '\x00'} + hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'} + hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'} + hdrArchMIPS archType = [...]byte{'0', '7', '\x00'} + hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'} + hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'} + hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'} + hdrArchS390x archType = [...]byte{'1', '1', '\x00'} +) + +type archType [3]byte + +// getSIFArch returns the archType corresponding to go runtime arch. +func getSIFArch(arch string) archType { + archMap := map[string]archType{ + "386": hdrArch386, + "amd64": hdrArchAMD64, + "arm": hdrArchARM, + "arm64": hdrArchARM64, + "ppc64": hdrArchPPC64, + "ppc64le": hdrArchPPC64le, + "mips": hdrArchMIPS, + "mipsle": hdrArchMIPSle, + "mips64": hdrArchMIPS64, + "mips64le": hdrArchMIPS64le, + "s390x": hdrArchS390x, + } + + t, ok := archMap[arch] + if !ok { + return hdrArchUnknown + } + return t +} + +// GoArch returns the go runtime arch corresponding to t. +func (t archType) GoArch() string { + archMap := map[archType]string{ + hdrArch386: "386", + hdrArchAMD64: "amd64", + hdrArchARM: "arm", + hdrArchARM64: "arm64", + hdrArchPPC64: "ppc64", + hdrArchPPC64le: "ppc64le", + hdrArchMIPS: "mips", + hdrArchMIPSle: "mipsle", + hdrArchMIPS64: "mips64", + hdrArchMIPS64le: "mips64le", + hdrArchS390x: "s390x", + } + + arch, ok := archMap[t] + if !ok { + arch = "unknown" + } + return arch +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go new file mode 100644 index 00000000000..d706fb1a59c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go @@ -0,0 +1,103 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "io" +) + +// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The +// zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte + pos int64 +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial contents. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +var errNegativeOffset = errors.New("negative offset") + +// ReadAt implements the io.ReaderAt interface. +func (b *Buffer) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, errNegativeOffset + } + + if off >= int64(len(b.buf)) { + return 0, io.EOF + } + + n = copy(p, b.buf[off:]) + if n < len(p) { + err = io.EOF + } + return n, err +} + +var errNegativePosition = errors.New("negative position") + +// Write implements the io.Writer interface. +func (b *Buffer) Write(p []byte) (n int, err error) { + if b.pos < 0 { + return 0, errNegativePosition + } + + if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need { + b.buf = append(b.buf, make([]byte, need-have)...) + } + + n = copy(b.buf[b.pos:], p) + b.pos += int64(n) + return n, nil +} + +var errInvalidWhence = errors.New("invalid whence") + +// Seek implements the io.Seeker interface. +func (b *Buffer) Seek(offset int64, whence int) (int64, error) { + var abs int64 + + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = b.pos + offset + case io.SeekEnd: + abs = int64(len(b.buf)) + offset + default: + return 0, errInvalidWhence + } + + if abs < 0 { + return 0, errNegativePosition + } + + b.pos = abs + return abs, nil +} + +var errTruncateRange = errors.New("truncation out of range") + +// Truncate discards all but the first n bytes from the buffer. +func (b *Buffer) Truncate(n int64) error { + if n < 0 || n > int64(len(b.buf)) { + return errTruncateRange + } + + b.buf = b.buf[:n] + return nil +} + +// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer +// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate). +func (b *Buffer) Bytes() []byte { return b.buf } + +// Len returns the number of bytes in the buffer. +func (b *Buffer) Len() int64 { return int64(len(b.buf)) } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go new file mode 100644 index 00000000000..e65bdb7476d --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go @@ -0,0 +1,680 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/google/uuid" +) + +// nextAligned finds the next offset that satisfies alignment. +func nextAligned(offset int64, alignment int) int64 { + align64 := uint64(alignment) + offset64 := uint64(offset) + + if align64 != 0 && offset64%align64 != 0 { + offset64 = (offset64 & ^(align64 - 1)) + align64 + } + + return int64(offset64) +} + +// writeDataObjectAt writes the data object described by di to ws, using time t, recording details +// in d. The object is written at the first position that satisfies the alignment requirements +// described by di following offsetUnaligned. +func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll + offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart) + if err != nil { + return err + } + + n, err := io.Copy(ws, di.r) + if err != nil { + return err + } + + if err := di.fillDescriptor(t, d); err != nil { + return err + } + d.Used = true + d.Offset = offset + d.Size = n + d.SizeWithPadding = offset - offsetUnaligned + n + + return nil +} + +var ( + errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image") + errPrimaryPartition = errors.New("image already contains a primary partition") +) + +// writeDataObject writes the data object described by di to f, using time t, recording details in +// the descriptor at index i. +func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error { + if i >= len(f.rds) { + return errInsufficientCapacity + } + + // If this is a primary partition, verify there isn't another primary partition, and update the + // architecture in the global header. + if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys { + if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 { + return errPrimaryPartition + } + + f.h.Arch = p.Arch + } + + d := &f.rds[i] + d.ID = uint32(i) + 1 + + if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil { + return err + } + + // Update minimum object ID map. + if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID { + f.minIDs[d.GroupID] = d.ID + } + + f.h.DescriptorsFree-- + f.h.DataSize += d.SizeWithPadding + + return nil +} + +// writeDescriptors writes the descriptors in f to backing storage. +func (f *FileImage) writeDescriptors() error { + if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.rds) +} + +// writeHeader writes the the global header in f to backing storage. +func (f *FileImage) writeHeader() error { + if _, err := f.rw.Seek(0, io.SeekStart); err != nil { + return err + } + + return binary.Write(f.rw, binary.LittleEndian, f.h) +} + +// createOpts accumulates container creation options. +type createOpts struct { + launchScript [hdrLaunchLen]byte + id uuid.UUID + descriptorsOffset int64 + descriptorCapacity int64 + dis []DescriptorInput + t time.Time + closeOnUnload bool +} + +// CreateOpt are used to specify container creation options. +type CreateOpt func(*createOpts) error + +var errLaunchScriptLen = errors.New("launch script too large") + +// OptCreateWithLaunchScript specifies s as the launch script. +func OptCreateWithLaunchScript(s string) CreateOpt { + return func(co *createOpts) error { + b := []byte(s) + + if len(b) >= len(co.launchScript) { + return errLaunchScriptLen + } + + copy(co.launchScript[:], b) + + return nil + } +} + +// OptCreateDeterministic sets header/descriptor fields to values that support deterministic +// creation of images. +func OptCreateDeterministic() CreateOpt { + return func(co *createOpts) error { + co.id = uuid.Nil + co.t = time.Time{} + return nil + } +} + +// OptCreateWithID specifies id as the unique ID. +func OptCreateWithID(id string) CreateOpt { + return func(co *createOpts) error { + id, err := uuid.Parse(id) + co.id = id + return err + } +} + +// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a +// maximum of n descriptors. +func OptCreateWithDescriptorCapacity(n int64) CreateOpt { + return func(co *createOpts) error { + co.descriptorCapacity = n + return nil + } +} + +// OptCreateWithDescriptors appends dis to the list of descriptors. +func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt { + return func(co *createOpts) error { + co.dis = append(co.dis, dis...) + return nil + } +} + +// OptCreateWithTime specifies t as the image creation time. +func OptCreateWithTime(t time.Time) CreateOpt { + return func(co *createOpts) error { + co.t = t + return nil + } +} + +// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptCreateWithCloseOnUnload(b bool) CreateOpt { + return func(co *createOpts) error { + co.closeOnUnload = b + return nil + } +} + +// createContainer creates a new SIF container file in rw, according to opts. +func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) { + rds := make([]rawDescriptor, co.descriptorCapacity) + rdsSize := int64(binary.Size(rds)) + + h := header{ + LaunchScript: co.launchScript, + Magic: hdrMagic, + Version: CurrentVersion.bytes(), + Arch: hdrArchUnknown, + ID: co.id, + CreatedAt: co.t.Unix(), + ModifiedAt: co.t.Unix(), + DescriptorsFree: co.descriptorCapacity, + DescriptorsTotal: co.descriptorCapacity, + DescriptorsOffset: co.descriptorsOffset, + DescriptorsSize: rdsSize, + DataOffset: co.descriptorsOffset + rdsSize, + } + + f := &FileImage{ + rw: rw, + h: h, + rds: rds, + minIDs: make(map[uint32]uint32), + } + + for i, di := range co.dis { + if err := f.writeDataObject(i, di, co.t); err != nil { + return nil, err + } + } + + if err := f.writeDescriptors(); err != nil { + return nil, err + } + + if err := f.writeHeader(); err != nil { + return nil, err + } + + return f, nil +} + +// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects +// can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptCreateWithCloseOnUnload. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) { + id, err := uuid.NewRandom() + if err != nil { + return nil, err + } + + co := createOpts{ + id: id, + descriptorsOffset: 4096, + descriptorCapacity: 48, + t: time.Now(), + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&co); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := createContainer(rw, co) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = co.closeOnUnload + return f, nil +} + +// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more +// data objects can optionally be specified using OptCreateWithDescriptors. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the image ID is set to a randomly generated value. To override this, consider using +// OptCreateDeterministic or OptCreateWithID. +// +// By default, the image creation time is set to time.Now(). To override this, consider using +// OptCreateDeterministic or OptCreateWithTime. +// +// By default, the image will support a maximum of 48 descriptors. To change this, consider using +// OptCreateWithDescriptorCapacity. +// +// A launch script can optionally be set using OptCreateWithLaunchScript. +func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) { + fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := CreateContainer(fp, opts...) + if err != nil { + fp.Close() + os.Remove(fp.Name()) + + return nil, err + } + + f.closeOnUnload = true + return f, nil +} + +func zeroData(fimg *FileImage, descr *rawDescriptor) error { + // first, move to data object offset + if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil { + return err + } + + var zero [4096]byte + n := descr.Size + upbound := int64(4096) + for { + if n < 4096 { + upbound = n + } + + if _, err := fimg.rw.Write(zero[:upbound]); err != nil { + return err + } + n -= 4096 + if n <= 0 { + break + } + } + + return nil +} + +func resetDescriptor(fimg *FileImage, index int) error { + // If we remove the primary partition, set the global header Arch field to HdrArchUnknown + // to indicate that the SIF file doesn't include a primary partition and no dependency + // on any architecture exists. + if fimg.rds[index].isPartitionOfType(PartPrimSys) { + fimg.h.Arch = hdrArchUnknown + } + + offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0])) + + // first, move to descriptor offset + if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil { + return err + } + + var emptyDesc rawDescriptor + return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc) +} + +// addOpts accumulates object add options. +type addOpts struct { + t time.Time +} + +// AddOpt are used to specify object add options. +type AddOpt func(*addOpts) error + +// OptAddDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptAddDeterministic() AddOpt { + return func(ao *addOpts) error { + ao.t = time.Time{} + return nil + } +} + +// OptAddWithTime specifies t as the image modification time. +func OptAddWithTime(t time.Time) AddOpt { + return func(ao *addOpts) error { + ao.t = t + return nil + } +} + +// AddObject adds a new data object and its descriptor into the specified SIF file. +// +// By default, the image modification time is set to the current time. To override this, consider +// using OptAddDeterministic or OptAddWithTime. +func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error { + ao := addOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&ao); err != nil { + return fmt.Errorf("%w", err) + } + } + + // Find an unused descriptor. + i := 0 + for _, rd := range f.rds { + if !rd.Used { + break + } + i++ + } + + if err := f.writeDataObject(i, di, ao.t); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = ao.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// isLast return true if the data object associated with d is the last in f. +func (f *FileImage) isLast(d *rawDescriptor) bool { + isLast := true + + end := d.Offset + d.Size + f.WithDescriptors(func(d Descriptor) bool { + isLast = d.Offset()+d.Size() <= end + return !isLast + }) + + return isLast +} + +// truncateAt truncates f at the start of the padded data object described by d. +func (f *FileImage) truncateAt(d *rawDescriptor) error { + start := d.Offset + d.Size - d.SizeWithPadding + + if err := f.rw.Truncate(start); err != nil { + return err + } + + return nil +} + +// deleteOpts accumulates object deletion options. +type deleteOpts struct { + zero bool + compact bool + t time.Time +} + +// DeleteOpt are used to specify object deletion options. +type DeleteOpt func(*deleteOpts) error + +// OptDeleteZero specifies whether the deleted object should be zeroed. +func OptDeleteZero(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.zero = b + return nil + } +} + +// OptDeleteCompact specifies whether the image should be compacted following object deletion. +func OptDeleteCompact(b bool) DeleteOpt { + return func(do *deleteOpts) error { + do.compact = b + return nil + } +} + +// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptDeleteDeterministic() DeleteOpt { + return func(do *deleteOpts) error { + do.t = time.Time{} + return nil + } +} + +// OptDeleteWithTime specifies t as the image modification time. +func OptDeleteWithTime(t time.Time) DeleteOpt { + return func(do *deleteOpts) error { + do.t = t + return nil + } +} + +var errCompactNotImplemented = errors.New("compact not implemented for non-last object") + +// DeleteObject deletes the data object with id, according to opts. +// +// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following +// object deletion, use OptDeleteCompact. +// +// By default, the image modification time is set to time.Now(). To override this, consider using +// OptDeleteDeterministic or OptDeleteWithTime. +func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error { + do := deleteOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&do); err != nil { + return fmt.Errorf("%w", err) + } + } + + d, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if do.compact && !f.isLast(d) { + return fmt.Errorf("%w", errCompactNotImplemented) + } + + if do.zero { + if err := zeroData(f, d); err != nil { + return fmt.Errorf("%w", err) + } + } + + if do.compact { + if err := f.truncateAt(d); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.DataSize -= d.SizeWithPadding + } + + f.h.DescriptorsFree++ + f.h.ModifiedAt = do.t.Unix() + + index := 0 + for i, od := range f.rds { + if od.ID == id { + index = i + break + } + } + + if err := resetDescriptor(f, index); err != nil { + return fmt.Errorf("%w", err) + } + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} + +// setOpts accumulates object set options. +type setOpts struct { + t time.Time +} + +// SetOpt are used to specify object set options. +type SetOpt func(*setOpts) error + +// OptSetDeterministic sets header/descriptor fields to values that support deterministic +// modification of images. +func OptSetDeterministic() SetOpt { + return func(so *setOpts) error { + so.t = time.Time{} + return nil + } +} + +// OptSetWithTime specifies t as the image/object modification time. +func OptSetWithTime(t time.Time) SetOpt { + return func(so *setOpts) error { + so.t = t + return nil + } +} + +var ( + errNotPartition = errors.New("data object not a partition") + errNotSystem = errors.New("data object not a system partition") +) + +// SetPrimPart sets the specified system partition to be the primary one. +// +// By default, the image/object modification times are set to time.Now(). To override this, +// consider using OptSetDeterministic or OptSetWithTime. +func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { + so := setOpts{ + t: time.Now(), + } + + for _, opt := range opts { + if err := opt(&so); err != nil { + return fmt.Errorf("%w", err) + } + } + + descr, err := f.getDescriptor(WithID(id)) + if err != nil { + return fmt.Errorf("%w", err) + } + + if descr.DataType != DataPartition { + return fmt.Errorf("%w", errNotPartition) + } + + fs, pt, arch, err := descr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + // if already primary system partition, nothing to do + if pt == PartPrimSys { + return nil + } + + if pt != PartSystem { + return fmt.Errorf("%w", errNotSystem) + } + + olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) + if err != nil && !errors.Is(err, ErrObjectNotFound) { + return fmt.Errorf("%w", err) + } + + f.h.Arch = getSIFArch(arch) + + extra := partition{ + Fstype: fs, + Parttype: PartPrimSys, + } + copy(extra.Arch[:], arch) + + if err := descr.setExtra(extra); err != nil { + return fmt.Errorf("%w", err) + } + + if olddescr != nil { + oldfs, _, oldarch, err := olddescr.getPartitionMetadata() + if err != nil { + return fmt.Errorf("%w", err) + } + + oldextra := partition{ + Fstype: oldfs, + Parttype: PartSystem, + Arch: getSIFArch(oldarch), + } + + if err := olddescr.setExtra(oldextra); err != nil { + return fmt.Errorf("%w", err) + } + } + + if err := f.writeDescriptors(); err != nil { + return fmt.Errorf("%w", err) + } + + f.h.ModifiedAt = so.t.Unix() + + if err := f.writeHeader(); err != nil { + return fmt.Errorf("%w", err) + } + + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go new file mode 100644 index 00000000000..da7a6a7c7be --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -0,0 +1,267 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "bytes" + "crypto" + "encoding/binary" + "errors" + "fmt" + "io" + "strings" + "time" +) + +// rawDescriptor represents an on-disk object descriptor. +type rawDescriptor struct { + DataType DataType + Used bool + ID uint32 + GroupID uint32 + LinkedID uint32 + Offset int64 + Size int64 + SizeWithPadding int64 + + CreatedAt int64 + ModifiedAt int64 + UID int64 // Deprecated: UID exists for historical compatibility and should not be used. + GID int64 // Deprecated: GID exists for historical compatibility and should not be used. + Name [descrNameLen]byte + Extra [descrMaxPrivLen]byte +} + +// partition represents the SIF partition data object descriptor. +type partition struct { + Fstype FSType + Parttype PartType + Arch archType +} + +// signature represents the SIF signature data object descriptor. +type signature struct { + Hashtype hashType + Entity [descrEntityLen]byte +} + +// cryptoMessage represents the SIF crypto message object descriptor. +type cryptoMessage struct { + Formattype FormatType + Messagetype MessageType +} + +var errNameTooLarge = errors.New("name value too large") + +// setName encodes name into the name field of d. +func (d *rawDescriptor) setName(name string) error { + if len(name) > len(d.Name) { + return errNameTooLarge + } + + for i := copy(d.Name[:], name); i < len(d.Name); i++ { + d.Name[i] = 0 + } + + return nil +} + +var errExtraTooLarge = errors.New("extra value too large") + +// setExtra encodes v into the extra field of d. +func (d *rawDescriptor) setExtra(v interface{}) error { + if v == nil { + return nil + } + + if binary.Size(v) > len(d.Extra) { + return errExtraTooLarge + } + + b := new(bytes.Buffer) + if err := binary.Write(b, binary.LittleEndian, v); err != nil { + return err + } + + for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ { + d.Extra[i] = 0 + } + + return nil +} + +// getPartitionMetadata gets metadata for a partition data object. +func (d rawDescriptor) getPartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + if got, want := d.DataType, DataPartition; got != want { + return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}} + } + + var p partition + + b := bytes.NewReader(d.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &p); err != nil { + return 0, 0, "", fmt.Errorf("%w", err) + } + + return p.Fstype, p.Parttype, p.Arch.GoArch(), nil +} + +// isPartitionOfType returns true if d is a partition data object of type pt. +func (d rawDescriptor) isPartitionOfType(pt PartType) bool { + _, t, _, err := d.getPartitionMetadata() + if err != nil { + return false + } + return t == pt +} + +// Descriptor represents the SIF descriptor type. +type Descriptor struct { + r io.ReaderAt // Backing storage. + + raw rawDescriptor // Raw descriptor from image. + + relativeID uint32 // ID relative to minimum ID of object group. +} + +// DataType returns the type of data object. +func (d Descriptor) DataType() DataType { return d.raw.DataType } + +// ID returns the data object ID of d. +func (d Descriptor) ID() uint32 { return d.raw.ID } + +// GroupID returns the data object group ID of d, or zero if d is not part of a data object +// group. +func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask } + +// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked +// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a +// data object ID. +func (d Descriptor) LinkedID() (id uint32, isGroup bool) { + return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask +} + +// Offset returns the offset of the data object. +func (d Descriptor) Offset() int64 { return d.raw.Offset } + +// Size returns the data object size. +func (d Descriptor) Size() int64 { return d.raw.Size } + +// CreatedAt returns the creation time of the data object. +func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) } + +// ModifiedAt returns the modification time of the data object. +func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) } + +// Name returns the name of the data object. +func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") } + +// PartitionMetadata gets metadata for a partition data object. +func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) { + return d.raw.getPartitionMetadata() +} + +var errHashUnsupported = errors.New("hash algorithm unsupported") + +// getHashType converts ht into a crypto.Hash. +func getHashType(ht hashType) (crypto.Hash, error) { + switch ht { + case hashSHA256: + return crypto.SHA256, nil + case hashSHA384: + return crypto.SHA384, nil + case hashSHA512: + return crypto.SHA512, nil + case hashBLAKE2S: + return crypto.BLAKE2s_256, nil + case hashBLAKE2B: + return crypto.BLAKE2b_256, nil + } + return 0, errHashUnsupported +} + +// SignatureMetadata gets metadata for a signature data object. +func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) { + if got, want := d.raw.DataType, DataSignature; got != want { + return ht, fp, &unexpectedDataTypeError{got, []DataType{want}} + } + + var s signature + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &s); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + if ht, err = getHashType(s.Hashtype); err != nil { + return ht, fp, fmt.Errorf("%w", err) + } + + fp = make([]byte, 20) + copy(fp, s.Entity[:]) + + return ht, fp, nil +} + +// CryptoMessageMetadata gets metadata for a crypto message data object. +func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) { + if got, want := d.raw.DataType, DataCryptoMessage; got != want { + return 0, 0, &unexpectedDataTypeError{got, []DataType{want}} + } + + var m cryptoMessage + + b := bytes.NewReader(d.raw.Extra[:]) + if err := binary.Read(b, binary.LittleEndian, &m); err != nil { + return 0, 0, fmt.Errorf("%w", err) + } + + return m.Formattype, m.Messagetype, nil +} + +// GetData returns the data object associated with descriptor d. +func (d Descriptor) GetData() ([]byte, error) { + b := make([]byte, d.raw.Size) + if _, err := io.ReadFull(d.GetReader(), b); err != nil { + return nil, err + } + return b, nil +} + +// GetReader returns a io.Reader that reads the data object associated with descriptor d. +func (d Descriptor) GetReader() io.Reader { + return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size) +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d. +func (d Descriptor) GetIntegrityReader() io.Reader { + fields := []interface{}{ + d.raw.DataType, + d.raw.Used, + d.relativeID, + d.raw.LinkedID, + d.raw.Size, + d.raw.CreatedAt, + d.raw.UID, + d.raw.GID, + } + + // Encode endian-sensitive fields. + data := bytes.Buffer{} + for _, f := range fields { + if err := binary.Write(&data, binary.LittleEndian, f); err != nil { + panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error. + } + } + + return io.MultiReader( + &data, + bytes.NewReader(d.raw.Name[:]), + bytes.NewReader(d.raw.Extra[:]), + ) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go new file mode 100644 index 00000000000..c55cf51f9de --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -0,0 +1,300 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "crypto" + "errors" + "fmt" + "io" + "os" + "time" +) + +// descriptorOpts accumulates data object options. +type descriptorOpts struct { + groupID uint32 + linkID uint32 + alignment int + name string + extra interface{} + t time.Time +} + +// DescriptorInputOpt are used to specify data object options. +type DescriptorInputOpt func(DataType, *descriptorOpts) error + +// OptNoGroup specifies the data object is not contained within a data object group. +func OptNoGroup() DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.groupID = 0 + return nil + } +} + +// OptGroupID specifies groupID as data object group ID. +func OptGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.groupID = groupID + return nil + } +} + +// OptLinkedID specifies that the data object is linked to the data object with the specified ID. +func OptLinkedID(id uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if id == 0 { + return ErrInvalidObjectID + } + opts.linkID = id + return nil + } +} + +// OptLinkedGroupID specifies that the data object is linked to the data object group with the +// specified groupID. +func OptLinkedGroupID(groupID uint32) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + if groupID == 0 { + return ErrInvalidGroupID + } + opts.linkID = groupID | descrGroupMask + return nil + } +} + +// OptObjectAlignment specifies n as the data alignment requirement. +func OptObjectAlignment(n int) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.alignment = n + return nil + } +} + +// OptObjectName specifies name as the data object name. +func OptObjectName(name string) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.name = name + return nil + } +} + +// OptObjectTime specifies t as the data object creation time. +func OptObjectTime(t time.Time) DescriptorInputOpt { + return func(_ DataType, opts *descriptorOpts) error { + opts.t = t + return nil + } +} + +type unexpectedDataTypeError struct { + got DataType + want []DataType +} + +func (e *unexpectedDataTypeError) Error() string { + return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want) +} + +func (e *unexpectedDataTypeError) Is(target error) bool { + //nolint:errorlint // don't compare wrapped errors in Is() + t, ok := target.(*unexpectedDataTypeError) + if !ok { + return false + } + + if len(t.want) > 0 { + // Use a map to check that the "want" errors in e and t contain the same values, ignoring + // any ordering differences. + acc := make(map[DataType]int, len(t.want)) + + // Increment counter for each data type in e. + for _, dt := range e.want { + if _, ok := acc[dt]; !ok { + acc[dt] = 0 + } + acc[dt]++ + } + + // Decrement counter for each data type in e. + for _, dt := range t.want { + if _, ok := acc[dt]; !ok { + return false + } + acc[dt]-- + } + + // If the "want" errors in e and t are equivalent, all counters should be zero. + for _, n := range acc { + if n != 0 { + return false + } + } + } + + return (e.got == t.got || t.got == 0) +} + +// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set +// to ft, and the message type is set to mt. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataCryptoMessage; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + m := cryptoMessage{ + Formattype: ft, + Messagetype: mt, + } + + opts.extra = m + return nil + } +} + +var errUnknownArchitcture = errors.New("unknown architecture") + +// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to +// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch +// should be the architecture as represented by the Go runtime. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataPartition; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + sifarch := getSIFArch(arch) + if sifarch == hdrArchUnknown { + return fmt.Errorf("%w: %v", errUnknownArchitcture, arch) + } + + p := partition{ + Fstype: fs, + Parttype: pt, + Arch: sifarch, + } + + opts.extra = p + return nil + } +} + +// sifHashType converts h into a HashType. +func sifHashType(h crypto.Hash) hashType { + switch h { + case crypto.SHA256: + return hashSHA256 + case crypto.SHA384: + return hashSHA384 + case crypto.SHA512: + return hashSHA512 + case crypto.BLAKE2s_256: + return hashBLAKE2S + case crypto.BLAKE2b_256: + return hashBLAKE2B + } + return 0 +} + +// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and +// the signing entity fingerprint is set to fp. +// +// If this option is applied to a data object with an incompatible type, an error is returned. +func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt { + return func(t DataType, opts *descriptorOpts) error { + if got, want := t, DataSignature; got != want { + return &unexpectedDataTypeError{got, []DataType{want}} + } + + s := signature{ + Hashtype: sifHashType(ht), + } + copy(s.Entity[:], fp) + + opts.extra = s + return nil + } +} + +// DescriptorInput describes a new data object. +type DescriptorInput struct { + dt DataType + r io.Reader + opts descriptorOpts +} + +// DefaultObjectGroup is the default group that data objects are placed in. +const DefaultObjectGroup = 1 + +// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents +// read from r, configured according to opts. +// +// It is possible (and often necessary) to store additional metadata related to certain types of +// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata, +// and OptSignatureMetadata for this purpose. +// +// By default, the data object will be placed in the default data object group (1). To override +// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or +// OptLinkedGroupID. +// +// By default, the data object will be aligned according to the system's memory page size. To +// override this behavior, consider using OptObjectAlignment. +// +// By default, no name is set for data object. To set a name, use OptObjectName. +// +// When creating a new image, data object creation/modification times are set to the image creation +// time. When modifying an existing image, the data object creation/modification time is set to the +// image modification time. To override this behavior, consider using OptObjectTime. +func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) { + dopts := descriptorOpts{ + groupID: DefaultObjectGroup, + alignment: os.Getpagesize(), + } + + for _, opt := range opts { + if err := opt(t, &dopts); err != nil { + return DescriptorInput{}, fmt.Errorf("%w", err) + } + } + + di := DescriptorInput{ + dt: t, + r: r, + opts: dopts, + } + + return di, nil +} + +// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t. +func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error { + d.DataType = di.dt + d.GroupID = di.opts.groupID | descrGroupMask + d.LinkedID = di.opts.linkID + + if !di.opts.t.IsZero() { + t = di.opts.t + } + d.CreatedAt = t.Unix() + d.ModifiedAt = t.Unix() + + d.UID = 0 + d.GID = 0 + + if err := d.setName(di.opts.name); err != nil { + return err + } + + return d.setExtra(di.opts.extra) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go new file mode 100644 index 00000000000..75266e1945c --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go @@ -0,0 +1,174 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" +) + +var ( + errInvalidMagic = errors.New("invalid SIF magic") + errIncompatibleVersion = errors.New("incompatible SIF version") +) + +// isValidSif looks at key fields from the global header to assess SIF validity. +func isValidSif(f *FileImage) error { + if f.h.Magic != hdrMagic { + return errInvalidMagic + } + + if f.h.Version != CurrentVersion.bytes() { + return errIncompatibleVersion + } + + return nil +} + +// populateMinIDs populates the minIDs field of f. +func (f *FileImage) populateMinIDs() { + f.minIDs = make(map[uint32]uint32) + f.WithDescriptors(func(d Descriptor) bool { + if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID { + f.minIDs[d.raw.GroupID] = d.ID() + } + return false + }) +} + +// loadContainer loads a SIF image from rw. +func loadContainer(rw ReadWriter) (*FileImage, error) { + f := FileImage{rw: rw} + + // Read global header. + err := binary.Read( + io.NewSectionReader(rw, 0, int64(binary.Size(f.h))), + binary.LittleEndian, + &f.h, + ) + if err != nil { + return nil, fmt.Errorf("reading global header: %w", err) + } + + if err := isValidSif(&f); err != nil { + return nil, err + } + + // Read descriptors. + f.rds = make([]rawDescriptor, f.h.DescriptorsTotal) + err = binary.Read( + io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize), + binary.LittleEndian, + &f.rds, + ) + if err != nil { + return nil, fmt.Errorf("reading descriptors: %w", err) + } + + f.populateMinIDs() + + return &f, nil +} + +// loadOpts accumulates container loading options. +type loadOpts struct { + flag int + closeOnUnload bool +} + +// LoadOpt are used to specify container loading options. +type LoadOpt func(*loadOpts) error + +// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file. +func OptLoadWithFlag(flag int) LoadOpt { + return func(lo *loadOpts) error { + lo.flag = flag + return nil + } +} + +// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer. +// By default, the ReadWriter will be closed if it implements the io.Closer interface. +func OptLoadWithCloseOnUnload(b bool) LoadOpt { + return func(lo *loadOpts) error { + lo.closeOnUnload = b + return nil + } +} + +// LoadContainerFromPath loads a new SIF container from path, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. +// +// By default, the file is opened for read and write access. To change this behavior, consider +// using OptLoadWithFlag. +func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + flag: os.O_RDWR, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + fp, err := os.OpenFile(path, lo.flag, 0) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f, err := loadContainer(fp) + if err != nil { + fp.Close() + + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = true + return f, nil +} + +// LoadContainer loads a new SIF container from rw, according to opts. +// +// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources +// are released. By default, UnloadContainer will close rw if it implements the io.Closer +// interface. To change this behavior, consider using OptLoadWithCloseOnUnload. +func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) { + lo := loadOpts{ + closeOnUnload: true, + } + + for _, opt := range opts { + if err := opt(&lo); err != nil { + return nil, fmt.Errorf("%w", err) + } + } + + f, err := loadContainer(rw) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + f.closeOnUnload = lo.closeOnUnload + return f, nil +} + +// UnloadContainer unloads f, releasing associated resources. +func (f *FileImage) UnloadContainer() error { + if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload { + if err := c.Close(); err != nil { + return fmt.Errorf("%w", err) + } + } + return nil +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go new file mode 100644 index 00000000000..635d6e89cfb --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go @@ -0,0 +1,210 @@ +// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +package sif + +import ( + "errors" + "fmt" +) + +// ErrNoObjects is the error returned when an image contains no data objects. +var ErrNoObjects = errors.New("no objects in image") + +// ErrObjectNotFound is the error returned when a data object is not found. +var ErrObjectNotFound = errors.New("object not found") + +// ErrMultipleObjectsFound is the error returned when multiple data objects are found. +var ErrMultipleObjectsFound = errors.New("multiple objects found") + +// ErrInvalidObjectID is the error returned when an invalid object ID is supplied. +var ErrInvalidObjectID = errors.New("invalid object ID") + +// ErrInvalidGroupID is the error returned when an invalid group ID is supplied. +var ErrInvalidGroupID = errors.New("invalid group ID") + +// DescriptorSelectorFunc returns true if d matches, and false otherwise. +type DescriptorSelectorFunc func(d Descriptor) (bool, error) + +// WithDataType selects descriptors that have data type dt. +func WithDataType(dt DataType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.DataType() == dt, nil + } +} + +// WithID selects descriptors with a matching ID. +func WithID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + return d.ID() == id, nil + } +} + +// WithNoGroup selects descriptors that are not contained within an object group. +func WithNoGroup() DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.GroupID() == 0, nil + } +} + +// WithGroupID returns a selector func that selects descriptors with a matching groupID. +func WithGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + return d.GroupID() == groupID, nil + } +} + +// WithLinkedID selects descriptors that are linked to the data object with specified ID. +func WithLinkedID(id uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if id == 0 { + return false, ErrInvalidObjectID + } + linkedID, isGroup := d.LinkedID() + return !isGroup && linkedID == id, nil + } +} + +// WithLinkedGroupID selects descriptors that are linked to the data object group with specified +// ID. +func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if groupID == 0 { + return false, ErrInvalidGroupID + } + linkedID, isGroup := d.LinkedID() + return isGroup && linkedID == groupID, nil + } +} + +// WithPartitionType selects descriptors containing a partition of type pt. +func WithPartitionType(pt PartType) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + return d.raw.isPartitionOfType(pt), nil + } +} + +// descriptorFromRaw populates a Descriptor from rd. +func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor { + return Descriptor{ + raw: *rd, + r: f.rw, + relativeID: rd.ID - f.minIDs[rd.GroupID], + } +} + +// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true. +// If the image contains no data objects, an error wrapping ErrNoObjects is returned. +func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return nil, fmt.Errorf("%w", ErrNoObjects) + } + + var ds []Descriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error { + ds = append(ds, f.descriptorFromRaw(d)) + return nil + }) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + return ds, nil +} + +// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is +// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns, +// ErrMultipleObjectsFound is returned. +func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) { + var d *rawDescriptor + + err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error { + if d != nil { + return ErrMultipleObjectsFound + } + d = found + return nil + }) + + if err == nil && d == nil { + err = ErrObjectNotFound + } + + return d, err +} + +// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data +// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an +// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an +// error wrapping ErrMultipleObjectsFound is returned. +func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) { + if f.DescriptorsFree() == f.DescriptorsTotal() { + return Descriptor{}, fmt.Errorf("%w", ErrNoObjects) + } + + d, err := f.getDescriptor(fns...) + if err != nil { + return Descriptor{}, fmt.Errorf("%w", err) + } + + return f.descriptorFromRaw(d), nil +} + +// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns +// select the descriptor. +func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + for _, fn := range fns { + if ok, err := fn(d); !ok || err != nil { + return ok, err + } + } + return true, nil + } +} + +// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns +// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is +// returned to the caller. +func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error { + for i, d := range f.rds { + if !d.Used { + continue + } + + if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil { + return err + } else if !ok { + continue + } + + if err := onMatchFn(&f.rds[i]); err != nil { + return err + } + } + + return nil +} + +var errAbort = errors.New("abort") + +// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can +// be used as a no-op matchFn. +func abortOnMatch(*rawDescriptor) error { return errAbort } + +// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true. +func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) { + selectFn := func(d Descriptor) (bool, error) { + return fn(d), nil + } + _ = f.withDescriptors(selectFn, abortOnMatch) +} diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go new file mode 100644 index 00000000000..704acee4a0e --- /dev/null +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -0,0 +1,364 @@ +// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2017, SingularityWare, LLC. All rights reserved. +// Copyright (c) 2017, Yannick Cote All rights reserved. +// This software is licensed under a 3-clause BSD license. Please consult the +// LICENSE file distributed with the sources of this project regarding your +// rights to use or distribute this software. + +// Package sif implements data structures and routines to create +// and access SIF files. +// +// Layout of a SIF file (example): +// +// .================================================. +// | GLOBAL HEADER: Sifheader | +// | - launch: "#!/usr/bin/env..." | +// | - magic: "SIF_MAGIC" | +// | - version: "1" | +// | - arch: "4" | +// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e | +// | - ctime: 1504657553 | +// | - mtime: 1504657653 | +// | - ndescr: 3 | +// | - descroff: 120 | --. +// | - descrlen: 432 | | +// | - dataoff: 4096 | | +// | - datalen: 619362 | | +// |------------------------------------------------| <-' +// | DESCR[0]: Sifdeffile | +// | - Sifcommon | +// | - datatype: DATA_DEFFILE | +// | - id: 1 | +// | - groupid: 1 | +// | - link: NONE | +// | - fileoff: 4096 | --. +// | - filelen: 222 | | +// |------------------------------------------------| <-----. +// | DESCR[1]: Sifpartition | | | +// | - Sifcommon | | | +// | - datatype: DATA_PARTITION | | | +// | - id: 2 | | | +// | - groupid: 1 | | | +// | - link: NONE | | | +// | - fileoff: 4318 | ----. | +// | - filelen: 618496 | | | | +// | - fstype: Squashfs | | | | +// | - parttype: System | | | | +// | - content: Linux | | | | +// |------------------------------------------------| | | | +// | DESCR[2]: Sifsignature | | | | +// | - Sifcommon | | | | +// | - datatype: DATA_SIGNATURE | | | | +// | - id: 3 | | | | +// | - groupid: NONE | | | | +// | - link: 2 | ------' +// | - fileoff: 622814 | ------. +// | - filelen: 644 | | | | +// | - hashtype: SHA384 | | | | +// | - entity: @ | | | | +// |------------------------------------------------| <-' | | +// | Definition file data | | | +// | . | | | +// | . | | | +// | . | | | +// |------------------------------------------------| <---' | +// | File system partition image | | +// | . | | +// | . | | +// | . | | +// |------------------------------------------------| <-----' +// | Signed verification data | +// | . | +// | . | +// | . | +// `================================================' +// +package sif + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/google/uuid" +) + +// SIF header constants and quantities. +const ( + hdrLaunchLen = 32 // len("#!/usr/bin/env... ") + hdrMagicLen = 10 // len("SIF_MAGIC") + hdrVersionLen = 3 // len("99") +) + +var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'} + +// SpecVersion specifies a SIF specification version. +type SpecVersion uint8 + +func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) } + +// bytes returns the value of b, formatted for direct inclusion in a SIF header. +func (v SpecVersion) bytes() [hdrVersionLen]byte { + var b [3]byte + copy(b[:], fmt.Sprintf("%02d", v)) + return b +} + +// SIF specification versions. +const ( + version01 SpecVersion = iota + 1 +) + +// CurrentVersion specifies the current SIF specification version. +const CurrentVersion = version01 + +const ( + descrGroupMask = 0xf0000000 // groups start at that offset + descrEntityLen = 256 // len("Joe Bloe ...") + descrNameLen = 128 // descriptor name (string identifier) + descrMaxPrivLen = 384 // size reserved for descriptor specific data +) + +// DataType represents the different SIF data object types stored in the image. +type DataType int32 + +// List of supported SIF data types. +const ( + DataDeffile DataType = iota + 0x4001 // definition file data object + DataEnvVar // environment variables data object + DataLabels // JSON labels data object + DataPartition // file system data object + DataSignature // signing/verification data object + DataGenericJSON // generic JSON meta-data + DataGeneric // generic / raw data + DataCryptoMessage // cryptographic message data object +) + +// String returns a human-readable representation of t. +func (t DataType) String() string { + switch t { + case DataDeffile: + return "Def.FILE" + case DataEnvVar: + return "Env.Vars" + case DataLabels: + return "JSON.Labels" + case DataPartition: + return "FS" + case DataSignature: + return "Signature" + case DataGenericJSON: + return "JSON.Generic" + case DataGeneric: + return "Generic/Raw" + case DataCryptoMessage: + return "Cryptographic Message" + } + return "Unknown" +} + +// FSType represents the different SIF file system types found in partition data objects. +type FSType int32 + +// List of supported file systems. +const ( + FsSquash FSType = iota + 1 // Squashfs file system, RDONLY + FsExt3 // EXT3 file system, RDWR (deprecated) + FsImmuObj // immutable data object archive + FsRaw // raw data + FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY +) + +// String returns a human-readable representation of t. +func (t FSType) String() string { + switch t { + case FsSquash: + return "Squashfs" + case FsExt3: + return "Ext3" + case FsImmuObj: + return "Archive" + case FsRaw: + return "Raw" + case FsEncryptedSquashfs: + return "Encrypted squashfs" + } + return "Unknown" +} + +// PartType represents the different SIF container partition types (system and data). +type PartType int32 + +// List of supported partition types. +const ( + PartSystem PartType = iota + 1 // partition hosts an operating system + PartPrimSys // partition hosts the primary operating system + PartData // partition hosts data only + PartOverlay // partition hosts an overlay +) + +// String returns a human-readable representation of t. +func (t PartType) String() string { + switch t { + case PartSystem: + return "System" + case PartPrimSys: + return "*System" + case PartData: + return "Data" + case PartOverlay: + return "Overlay" + } + return "Unknown" +} + +// hashType represents the different SIF hashing function types used to fingerprint data objects. +type hashType int32 + +// List of supported hash functions. +const ( + hashSHA256 hashType = iota + 1 + hashSHA384 + hashSHA512 + hashBLAKE2S + hashBLAKE2B +) + +// FormatType represents the different formats used to store cryptographic message objects. +type FormatType int32 + +// List of supported cryptographic message formats. +const ( + FormatOpenPGP FormatType = iota + 1 + FormatPEM +) + +// String returns a human-readable representation of t. +func (t FormatType) String() string { + switch t { + case FormatOpenPGP: + return "OpenPGP" + case FormatPEM: + return "PEM" + } + return "Unknown" +} + +// MessageType represents the different messages stored within cryptographic message objects. +type MessageType int32 + +// List of supported cryptographic message formats. +const ( + // openPGP formatted messages. + MessageClearSignature MessageType = 0x100 + + // PEM formatted messages. + MessageRSAOAEP MessageType = 0x200 +) + +// String returns a human-readable representation of t. +func (t MessageType) String() string { + switch t { + case MessageClearSignature: + return "Clear Signature" + case MessageRSAOAEP: + return "RSA-OAEP" + } + return "Unknown" +} + +// header describes a loaded SIF file. +type header struct { + LaunchScript [hdrLaunchLen]byte + + Magic [hdrMagicLen]byte + Version [hdrVersionLen]byte + Arch archType + ID uuid.UUID + + CreatedAt int64 + ModifiedAt int64 + + DescriptorsFree int64 + DescriptorsTotal int64 + DescriptorsOffset int64 + DescriptorsSize int64 + DataOffset int64 + DataSize int64 +} + +// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h. +func (h header) GetIntegrityReader() io.Reader { + return io.MultiReader( + bytes.NewReader(h.LaunchScript[:]), + bytes.NewReader(h.Magic[:]), + bytes.NewReader(h.Version[:]), + bytes.NewReader(h.ID[:]), + ) +} + +// ReadWriter describes the interface required to read and write SIF images. +type ReadWriter interface { + io.ReaderAt + io.WriteSeeker + Truncate(int64) error +} + +// FileImage describes the representation of a SIF file in memory. +type FileImage struct { + rw ReadWriter // Backing storage for image. + + h header // Raw global header from image. + rds []rawDescriptor // Raw descriptors from image. + + closeOnUnload bool // Close rw on Unload. + minIDs map[uint32]uint32 // Minimum object IDs for each group ID. +} + +// LaunchScript returns the image launch script. +func (f *FileImage) LaunchScript() string { + return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00")) +} + +// Version returns the SIF specification version of the image. +func (f *FileImage) Version() string { + return string(bytes.TrimRight(f.h.Version[:], "\x00")) +} + +// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU +// architecture cannot be determined. +func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() } + +// ID returns the ID of the image. +func (f *FileImage) ID() string { return f.h.ID.String() } + +// CreatedAt returns the creation time of the image. +func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) } + +// ModifiedAt returns the last modification time of the image. +func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) } + +// DescriptorsFree returns the number of free descriptors in the image. +func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree } + +// DescriptorsTotal returns the total number of descriptors in the image. +func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal } + +// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset } + +// DescriptorsSize returns the size (in bytes) of the descriptors section in the image. +func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize } + +// DataOffset returns the offset (in bytes) of the data section in the image. +func (f *FileImage) DataOffset() int64 { return f.h.DataOffset } + +// DataSize returns the size (in bytes) of the data section in the image. +func (f *FileImage) DataSize() int64 { return f.h.DataSize } + +// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the +// header of the image. +func (f *FileImage) GetHeaderIntegrityReader() io.Reader { + return f.h.GetIntegrityReader() +} diff --git a/vendor/github.com/vbauerster/mpb/v7/.travis.yml b/vendor/github.com/vbauerster/mpb/v7/.travis.yml deleted file mode 100644 index 9a203a67d2a..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le - -go: - - 1.14.x - -script: - - go test -race ./... - - for i in _examples/*/; do go build $i/*.go || exit 1; done diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v7/README.md index 90d4fe639ca..413f9e1dbf0 100644 --- a/vendor/github.com/vbauerster/mpb/v7/README.md +++ b/vendor/github.com/vbauerster/mpb/v7/README.md @@ -1,8 +1,7 @@ # Multi Progress Bar [![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v7) -[![Build Status](https://travis-ci.org/vbauerster/mpb.svg?branch=master)](https://travis-ci.org/vbauerster/mpb) -[![Go Report Card](https://goreportcard.com/badge/github.com/vbauerster/mpb)](https://goreportcard.com/report/github.com/vbauerster/mpb) +[![Test status](https://github.com/vbauerster/mpb/actions/workflows/test.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/test.yml) [![Donate with PayPal](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/vbauerster) **mpb** is a Go lib for rendering progress bars in terminal applications. @@ -37,10 +36,10 @@ func main() { total := 100 name := "Single Bar:" - // adding a single bar, which will inherit container's width - bar := p.Add(int64(total), - // progress bar filler with customized style - mpb.NewBarFiller(mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟")), + // create a single bar, which will inherit container's width + bar := p.New(int64(total), + // BarFillerBuilder with custom style + mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"), mpb.PrependDecorators( // display our name with one space on the right decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), @@ -66,7 +65,7 @@ func main() { ```go var wg sync.WaitGroup - // passed &wg will be accounted at p.Wait() call + // passed wg will be accounted at p.Wait() call p := mpb.New(mpb.WithWaitGroup(&wg)) total, numBars := 100, 3 wg.Add(numBars) @@ -104,7 +103,7 @@ func main() { } }() } - // Waiting for passed &wg and for all bars to complete and flush + // wait for passed wg and for all bars to complete and flush p.Wait() ``` diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go index dabe1a47546..646cb471a61 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar.go @@ -5,9 +5,9 @@ import ( "context" "fmt" "io" - "log" "runtime/debug" "strings" + "sync" "time" "github.com/acarl005/stripansi" @@ -35,7 +35,6 @@ type Bar struct { cacheState *bState container *Progress - dlogger *log.Logger recoveredPanic interface{} } @@ -50,11 +49,11 @@ type bState struct { total int64 current int64 refill int64 - lastN int64 - iterated bool + lastIncrement int64 trimSpace bool completed bool completeFlushed bool + aborted bool triggerComplete bool dropOnComplete bool noPop bool @@ -63,7 +62,7 @@ type bState struct { averageDecorators []decor.AverageDecorator ewmaDecorators []decor.EwmaDecorator shutdownListeners []decor.ShutdownListener - bufP, bufB, bufA *bytes.Buffer + buffers [3]*bytes.Buffer filler BarFiller middleware func(BarFiller) BarFiller extender extenderFunc @@ -80,7 +79,6 @@ type frame struct { } func newBar(container *Progress, bs *bState) *Bar { - logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id) ctx, cancel := context.WithCancel(container.ctx) bar := &Bar{ @@ -92,7 +90,6 @@ func newBar(container *Progress, bs *bState) *Bar { frameCh: make(chan *frame, 1), done: make(chan struct{}), cancel: cancel, - dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile), } go bar.serve(ctx, bs) @@ -105,7 +102,7 @@ func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { panic("expected non nil io.Reader") } - return newProxyReader(r, b) + return b.newProxyReader(r) } // ID returs id of the bar. @@ -164,12 +161,12 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { } // SetTotal sets total dynamically. -// If total is less than or equal to zero it takes progress' current value. +// If total is negative it takes progress' current value. func (b *Bar) SetTotal(total int64, triggerComplete bool) { select { case b.operateState <- func(s *bState) { s.triggerComplete = triggerComplete - if total <= 0 { + if total < 0 { s.total = s.current } else { s.total = total @@ -189,8 +186,7 @@ func (b *Bar) SetTotal(total int64, triggerComplete bool) { func (b *Bar) SetCurrent(current int64) { select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = current - s.current + s.lastIncrement = current - s.current s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total @@ -214,10 +210,12 @@ func (b *Bar) IncrBy(n int) { // IncrInt64 increments progress by amount of n. func (b *Bar) IncrInt64(n int64) { + if n <= 0 { + return + } select { case b.operateState <- func(s *bState) { - s.iterated = true - s.lastN = n + s.lastIncrement = n s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total @@ -236,10 +234,18 @@ func (b *Bar) IncrInt64(n int64) { func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { select { case b.operateState <- func(s *bState) { - ewmaIterationUpdate(false, s, dur) + if s.lastIncrement > 0 { + s.decoratorEwmaUpdate(dur) + s.lastIncrement = 0 + } else { + panic("increment required before ewma iteration update") + } }: case <-b.done: - ewmaIterationUpdate(true, b.cacheState, dur) + if b.cacheState.lastIncrement > 0 { + b.cacheState.decoratorEwmaUpdate(dur) + b.cacheState.lastIncrement = 0 + } } } @@ -249,9 +255,7 @@ func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { func (b *Bar) DecoratorAverageAdjust(start time.Time) { select { case b.operateState <- func(s *bState) { - for _, d := range s.averageDecorators { - d.AverageAdjust(start) - } + s.decoratorAverageAdjust(start) }: case <-b.done: } @@ -271,10 +275,12 @@ func (b *Bar) Abort(drop bool) { done := make(chan struct{}) select { case b.operateState <- func(s *bState) { - if s.completed == true { + if s.completed { close(done) return } + s.aborted = true + b.cancel() // container must be run during lifetime of this inner goroutine // we control this by done channel declared above go func() { @@ -295,7 +301,6 @@ func (b *Bar) Abort(drop bool) { } close(done) // release hold of Abort }() - b.cancel() }: // guarantee: container is alive during lifetime of this hold <-done @@ -321,10 +326,7 @@ func (b *Bar) serve(ctx context.Context, s *bState) { case op := <-b.operateState: op(s) case <-ctx.Done(): - // Notifying decorators about shutdown event - for _, sl := range s.shutdownListeners { - sl.Shutdown() - } + s.decoratorShutdownNotify() b.cacheState = s close(b.done) return @@ -340,13 +342,16 @@ func (b *Bar) render(tw int) { // recovering if user defined decorator panics for example if p := recover(); p != nil { if b.recoveredPanic == nil { + if s.debugOut != nil { + fmt.Fprintln(s.debugOut, p) + _, _ = s.debugOut.Write(debug.Stack()) + } s.extender = makePanicExtender(p) b.toShutdown = !b.toShutdown b.recoveredPanic = p } reader, lines := s.extender(nil, s.reqWidth, stat) b.frameCh <- &frame{reader, lines + 1} - b.dlogger.Println(p) } s.completeFlushed = s.completed }() @@ -423,40 +428,41 @@ func (b *Bar) wSyncTable() [][]chan int { } func (s *bState) draw(stat decor.Statistics) io.Reader { + bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] nlr := strings.NewReader("\n") tw := stat.AvailableWidth for _, d := range s.pDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufP.WriteString(str) + bufP.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…")) - s.bufP.Reset() + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…")) + bufP.Reset() return io.MultiReader(trunc, nlr) } if !s.trimSpace && stat.AvailableWidth > 1 { stat.AvailableWidth -= 2 - s.bufB.WriteByte(' ') - defer s.bufB.WriteByte(' ') + bufB.WriteByte(' ') + defer bufB.WriteByte(' ') } tw = stat.AvailableWidth for _, d := range s.aDecorators { str := d.Decor(stat) stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - s.bufA.WriteString(str) + bufA.WriteString(str) } if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…")) - s.bufA.Reset() - return io.MultiReader(s.bufP, s.bufB, trunc, nlr) + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…")) + bufA.Reset() + return io.MultiReader(bufP, bufB, trunc, nlr) } - s.filler.Fill(s.bufB, s.reqWidth, stat) + s.filler.Fill(bufB, s.reqWidth, stat) - return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr) + return io.MultiReader(bufP, bufB, bufA, nlr) } func (s *bState) wSyncTable() [][]chan int { @@ -481,6 +487,57 @@ func (s *bState) wSyncTable() [][]chan int { return table } +func (s bState) decoratorEwmaUpdate(dur time.Duration) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.ewmaDecorators); i++ { + switch d := s.ewmaDecorators[i]; i { + case len(s.ewmaDecorators) - 1: + d.EwmaUpdate(s.lastIncrement, dur) + default: + wg.Add(1) + go func() { + d.EwmaUpdate(s.lastIncrement, dur) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorAverageAdjust(start time.Time) { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.averageDecorators); i++ { + switch d := s.averageDecorators[i]; i { + case len(s.averageDecorators) - 1: + d.AverageAdjust(start) + default: + wg.Add(1) + go func() { + d.AverageAdjust(start) + wg.Done() + }() + } + } + wg.Wait() +} + +func (s bState) decoratorShutdownNotify() { + wg := new(sync.WaitGroup) + for i := 0; i < len(s.shutdownListeners); i++ { + switch d := s.shutdownListeners[i]; i { + case len(s.shutdownListeners) - 1: + d.Shutdown() + default: + wg.Add(1) + go func() { + d.Shutdown() + wg.Done() + }() + } + } + wg.Wait() +} + func newStatistics(tw int, s *bState) decor.Statistics { return decor.Statistics{ ID: s.id, @@ -489,6 +546,7 @@ func newStatistics(tw int, s *bState) decor.Statistics { Current: s.current, Refill: s.refill, Completed: s.completeFlushed, + Aborted: s.aborted, } } @@ -499,27 +557,13 @@ func extractBaseDecorator(d decor.Decorator) decor.Decorator { return d } -func ewmaIterationUpdate(done bool, s *bState, dur time.Duration) { - if !done && !s.iterated { - panic("increment required before ewma iteration update") - } else { - s.iterated = false - } - for _, d := range s.ewmaDecorators { - d.EwmaUpdate(s.lastN, dur) - } -} - func makePanicExtender(p interface{}) extenderFunc { pstr := fmt.Sprint(p) - stack := debug.Stack() - stackLines := bytes.Count(stack, []byte("\n")) return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) { mr := io.MultiReader( strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")), - strings.NewReader(fmt.Sprintf("\n%#v\n", st)), - bytes.NewReader(stack), + strings.NewReader("\n"), ) - return mr, stackLines + 1 + return mr, 0 } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go index a69087c474e..81177fc2eea 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler.go @@ -9,31 +9,42 @@ import ( // BarFiller interface. // Bar (without decorators) renders itself by calling BarFiller's Fill method. // -// reqWidth is requested width, set by `func WithWidth(int) ContainerOption`. +// reqWidth is requested width set by `func WithWidth(int) ContainerOption`. // If not set, it defaults to terminal width. // -// Default implementations can be obtained via: -// -// func NewBarFiller(BarStyle()) BarFiller -// func NewBarFiller(SpinnerStyle()) BarFiller -// type BarFiller interface { Fill(w io.Writer, reqWidth int, stat decor.Statistics) } // BarFillerBuilder interface. +// Default implementations are: +// +// BarStyle() +// SpinnerStyle() +// NopStyle() +// type BarFillerBuilder interface { Build() BarFiller } -// BarFillerFunc is function type adapter to convert function into BarFiller. +// BarFillerFunc is function type adapter to convert compatible function +// into BarFiller interface. type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics) func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { f(w, reqWidth, stat) } +// BarFillerBuilderFunc is function type adapter to convert compatible +// function into BarFillerBuilder interface. +type BarFillerBuilderFunc func() BarFiller + +func (f BarFillerBuilderFunc) Build() BarFiller { + return f() +} + // NewBarFiller constructs a BarFiller from provided BarFillerBuilder. +// Deprecated. Prefer using `*Progress.New(...)` directly. func NewBarFiller(b BarFillerBuilder) BarFiller { return b.Build() } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go index 80b2104555d..54b7bfd6f04 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go @@ -32,13 +32,13 @@ type BarStyleComposer interface { } type bFiller struct { + rev bool components [components]*component tip struct { count uint onComplete *component frames []*component } - flush func(dst io.Writer, filling, padding [][]byte) } type component struct { @@ -113,14 +113,7 @@ func (s *barStyle) Reverse() BarStyleComposer { } func (s *barStyle) Build() BarFiller { - bf := new(bFiller) - if s.rev { - bf.flush = func(dst io.Writer, filling, padding [][]byte) { - flush(dst, padding, filling) - } - } else { - bf.flush = flush - } + bf := &bFiller{rev: s.rev} bf.components[iLbound] = &component{ width: runewidth.StringWidth(stripansi.Strip(s.lbound)), bytes: []byte(s.lbound), @@ -164,8 +157,9 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } - w.Write(s.components[iLbound].bytes) - defer w.Write(s.components[iRbound].bytes) + ow := optimisticWriter(w) + ow(s.components[iLbound].bytes) + defer ow(s.components[iRbound].bytes) if width == 0 { return @@ -236,14 +230,27 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { } } - s.flush(w, filling, padding) + if s.rev { + flush(ow, padding, filling) + } else { + flush(ow, filling, padding) + } } -func flush(dst io.Writer, filling, padding [][]byte) { +func flush(ow func([]byte), filling, padding [][]byte) { for i := len(filling) - 1; i >= 0; i-- { - dst.Write(filling[i]) + ow(filling[i]) } for i := 0; i < len(padding); i++ { - dst.Write(padding[i]) + ow(padding[i]) + } +} + +func optimisticWriter(w io.Writer) func([]byte) { + return func(p []byte) { + _, err := w.Write(p) + if err != nil { + panic(err) + } } } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go new file mode 100644 index 00000000000..1a7086fecb6 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go @@ -0,0 +1,14 @@ +package mpb + +import ( + "io" + + "github.com/vbauerster/mpb/v7/decor" +) + +// NopStyle provides BarFillerBuilder which builds NOP BarFiller. +func NopStyle() BarFillerBuilder { + return BarFillerBuilderFunc(func() BarFiller { + return BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + }) +} diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go index 58ae1c53283..d38525efc94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go @@ -73,15 +73,19 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) { return } + var err error rest := width - frameWidth switch s.position { case positionLeft: - io.WriteString(w, frame+strings.Repeat(" ", rest)) + _, err = io.WriteString(w, frame+strings.Repeat(" ", rest)) case positionRight: - io.WriteString(w, strings.Repeat(" ", rest)+frame) + _, err = io.WriteString(w, strings.Repeat(" ", rest)+frame) default: str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2) - io.WriteString(w, str) + _, err = io.WriteString(w, str) + } + if err != nil { + panic(err) } s.count++ } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_option.go b/vendor/github.com/vbauerster/mpb/v7/bar_option.go index 46b7de0bf56..4ba49050535 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_option.go @@ -5,12 +5,20 @@ import ( "io" "github.com/vbauerster/mpb/v7/decor" - "github.com/vbauerster/mpb/v7/internal" ) // BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) +func skipNil(decorators []decor.Decorator) (filtered []decor.Decorator) { + for _, d := range decorators { + if d != nil { + filtered = append(filtered, d) + } + } + return +} + func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) { type mergeWrapper interface { MergeUnwrap() []decor.Decorator @@ -26,14 +34,14 @@ func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Deco // AppendDecorators let you inject decorators to the bar's right side. func AppendDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.aDecorators, decorators...) + s.addDecorators(&s.aDecorators, skipNil(decorators)...) } } // PrependDecorators let you inject decorators to the bar's left side. func PrependDecorators(decorators ...decor.Decorator) BarOption { return func(s *bState) { - s.addDecorators(&s.pDecorators, decorators...) + s.addDecorators(&s.pDecorators, skipNil(decorators)...) } } @@ -81,7 +89,10 @@ func BarFillerOnComplete(message string) BarOption { return BarFillerMiddleware(func(base BarFiller) BarFiller { return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) { if st.Completed { - io.WriteString(w, message) + _, err := io.WriteString(w, message) + if err != nil { + panic(err) + } } else { base.Fill(w, reqWidth, st) } @@ -138,9 +149,12 @@ func BarNoPop() BarOption { } } -// BarOptional will invoke provided option only when pick is true. -func BarOptional(option BarOption, pick bool) BarOption { - return BarOptOn(option, internal.Predicate(pick)) +// BarOptional will invoke provided option only when cond is true. +func BarOptional(option BarOption, cond bool) BarOption { + if cond { + return option + } + return nil } // BarOptOn will invoke provided option only when higher order predicate diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v7/container_option.go index a858c3c51dd..e523a175938 100644 --- a/vendor/github.com/vbauerster/mpb/v7/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/container_option.go @@ -5,8 +5,6 @@ import ( "io/ioutil" "sync" "time" - - "github.com/vbauerster/mpb/v7/internal" ) // ContainerOption is a func option to alter default behavior of a bar @@ -101,9 +99,12 @@ func PopCompletedMode() ContainerOption { } } -// ContainerOptional will invoke provided option only when pick is true. -func ContainerOptional(option ContainerOption, pick bool) ContainerOption { - return ContainerOptOn(option, internal.Predicate(pick)) +// ContainerOptional will invoke provided option only when cond is true. +func ContainerOptional(option ContainerOption, cond bool) ContainerOption { + if cond { + return option + } + return nil } // ContainerOptOn will invoke provided option only when higher order diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go index 925c8b1dcf1..eaf541cb756 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go +++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go @@ -76,9 +76,9 @@ func (w *Writer) GetWidth() (int, error) { return tw, err } -func (w *Writer) ansiCuuAndEd() (err error) { +func (w *Writer) ansiCuuAndEd() error { buf := make([]byte, 8) buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10) - _, err = w.out.Write(append(buf, cuuAndEd...)) - return + _, err := w.out.Write(append(buf, cuuAndEd...)) + return err } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go index e81fae367ee..9fec57b159f 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go @@ -53,6 +53,7 @@ type Statistics struct { Current int64 Refill int64 Completed bool + Aborted bool } // Decorator interface. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go index e41406a64da..84767115515 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/merge.go @@ -17,6 +17,9 @@ import ( // +----+--------+---------+--------+ // func Merge(decorator Decorator, placeholders ...WC) Decorator { + if decorator == nil { + return nil + } if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 { return decorator } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go new file mode 100644 index 00000000000..10ff67009cc --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go @@ -0,0 +1,41 @@ +package decor + +// OnAbort returns decorator, which wraps provided decorator with sole +// purpose to display provided message on abort event. It has no effect +// if bar.Abort(drop bool) is called with true argument. +// +// `decorator` Decorator to wrap +// +// `message` message to display on abort event +// +func OnAbort(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } + d := &onAbortWrapper{ + Decorator: decorator, + msg: message, + } + if md, ok := decorator.(*mergeDecorator); ok { + d.Decorator, md.Decorator = md.Decorator, d + return md + } + return d +} + +type onAbortWrapper struct { + Decorator + msg string +} + +func (d *onAbortWrapper) Decor(s Statistics) string { + if s.Aborted { + wc := d.GetConf() + return wc.FormatMsg(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d *onAbortWrapper) Base() Decorator { + return d.Decorator +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go index f46b19abaa9..2ada2b31b16 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go @@ -1,6 +1,6 @@ package decor -// OnComplete returns decorator, which wraps provided decorator, with +// OnComplete returns decorator, which wraps provided decorator with // sole purpose to display provided message on complete event. // // `decorator` Decorator to wrap @@ -8,6 +8,9 @@ package decor // `message` message to display on complete event // func OnComplete(decorator Decorator, message string) Decorator { + if decorator == nil { + return nil + } d := &onCompleteWrapper{ Decorator: decorator, msg: message, diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go new file mode 100644 index 00000000000..a9db0653aee --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go @@ -0,0 +1,27 @@ +package decor + +// OnPredicate returns decorator if predicate evaluates to true. +// +// `decorator` Decorator +// +// `predicate` func() bool +// +func OnPredicate(decorator Decorator, predicate func() bool) Decorator { + if predicate() { + return decorator + } + return nil +} + +// OnCondition returns decorator if condition is true. +// +// `decorator` Decorator +// +// `cond` bool +// +func OnCondition(decorator Decorator, cond bool) Decorator { + if cond { + return decorator + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go new file mode 100644 index 00000000000..ea9fda79da4 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go @@ -0,0 +1,12 @@ +package decor + +import "io" + +func optimisticStringWriter(w io.Writer) func(string) { + return func(s string) { + _, err := io.WriteString(w, s) + if err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go index 2b0a7a9562c..6e7f5c6ed68 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "strconv" "github.com/vbauerster/mpb/v7/internal" @@ -24,12 +23,12 @@ func (s percentageType) Format(st fmt.State, verb rune) { } } - io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) - + osw := optimisticStringWriter(st) + osw(strconv.FormatFloat(float64(s), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + osw(" ") } - io.WriteString(st, "%") + osw("%") } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go index e4b9740584b..12879b8f143 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go @@ -2,8 +2,6 @@ package decor import ( "fmt" - "io" - "math" "strconv" ) @@ -47,16 +45,16 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { unit = _iMiB case self < _iTiB: unit = _iGiB - case self <= math.MaxInt64: + default: unit = _iTiB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + osw := optimisticStringWriter(st) + osw(strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + osw(" ") } - io.WriteString(st, unit.String()) + osw(unit.String()) } const ( @@ -96,14 +94,14 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { unit = _MB case self < _TB: unit = _GB - case self <= math.MaxInt64: + default: unit = _TB } - io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) - + osw := optimisticStringWriter(st) + osw(strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) if st.Flag(' ') { - io.WriteString(st, " ") + osw(" ") } - io.WriteString(st, unit.String()) + osw(unit.String()) } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go index 634edabfdc0..99cfde2bf71 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go @@ -2,7 +2,6 @@ package decor import ( "fmt" - "io" "math" "time" @@ -24,7 +23,7 @@ type speedFormatter struct { func (self *speedFormatter) Format(st fmt.State, verb rune) { self.Formatter.Format(st, verb) - io.WriteString(st, "/s") + optimisticStringWriter(st)("/s") } // EwmaSpeed exponential-weighted-moving-average based speed decorator. diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go b/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go deleted file mode 100644 index 1e4dd24d9da..00000000000 --- a/vendor/github.com/vbauerster/mpb/v7/internal/predicate.go +++ /dev/null @@ -1,6 +0,0 @@ -package internal - -// Predicate helper for internal use. -func Predicate(pick bool) func() bool { - return func() bool { return pick } -} diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go index c60c6569406..123af17cfe0 100644 --- a/vendor/github.com/vbauerster/mpb/v7/progress.go +++ b/vendor/github.com/vbauerster/mpb/v7/progress.go @@ -6,8 +6,6 @@ import ( "context" "fmt" "io" - "io/ioutil" - "log" "math" "os" "sync" @@ -33,7 +31,6 @@ type Progress struct { done chan struct{} refreshCh chan time.Time once sync.Once - dlogger *log.Logger } // pState holds bars in its priorityQueue. It gets passed to @@ -75,7 +72,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { rr: prr, parkedBars: make(map[*Bar]*Bar), output: os.Stdout, - debugOut: ioutil.Discard, } for _, opt := range options { @@ -91,7 +87,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { bwg: new(sync.WaitGroup), operateState: make(chan func(*pState)), done: make(chan struct{}), - dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile), } p.cwg.Add(1) @@ -99,17 +94,19 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { return p } -// AddBar creates a bar with default bar filler. Different filler can -// be chosen and applied via `*Progress.Add(...) *Bar` method. +// AddBar creates a bar with default bar filler. func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(BarStyle()), options...) + return p.New(total, BarStyle(), options...) } -// AddSpinner creates a bar with default spinner filler. Different -// filler can be chosen and applied via `*Progress.Add(...) *Bar` -// method. +// AddSpinner creates a bar with default spinner filler. func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { - return p.Add(total, NewBarFiller(SpinnerStyle()), options...) + return p.New(total, SpinnerStyle(), options...) +} + +// New creates a bar with provided BarFillerBuilder. +func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { + return p.Add(total, builder.Build(), options...) } // Add creates a bar which renders itself by provided filler. @@ -117,7 +114,7 @@ func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { // Panics if *Progress instance is done, i.e. called after *Progress.Wait(). func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar { if filler == nil { - filler = BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + filler = NopStyle().Build() } p.bwg.Add(1) result := make(chan *Bar) @@ -232,12 +229,26 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { op(s) case <-p.refreshCh: if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } case <-s.shutdownNotifier: for s.heapUpdated { if err := s.render(cw); err != nil { - p.dlogger.Println(err) + if s.debugOut != nil { + _, e := fmt.Fprintln(s.debugOut, err) + if e != nil { + panic(err) + } + } else { + panic(err) + } } } return @@ -309,7 +320,10 @@ func (s *pState) flush(cw *cwriter.Writer) error { for s.bHeap.Len() > 0 { b := heap.Pop(&s.bHeap).(*Bar) frame := <-b.frameCh - cw.ReadFrom(frame.reader) + _, err := cw.ReadFrom(frame.reader) + if err != nil { + return err + } if b.toShutdown { if b.recoveredPanic != nil { s.barShutdownQueue = append(s.barShutdownQueue, b) @@ -400,9 +414,9 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio bs.priority = -(math.MaxInt32 - s.idCount) } - bs.bufP = bytes.NewBuffer(make([]byte, 0, 128)) - bs.bufB = bytes.NewBuffer(make([]byte, 0, 256)) - bs.bufA = bytes.NewBuffer(make([]byte, 0, 128)) + for i := 0; i < len(bs.buffers); i++ { + bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) + } return bs } diff --git a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go index 316f438d76b..25f195bb82f 100644 --- a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go +++ b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go @@ -11,73 +11,69 @@ type proxyReader struct { bar *Bar } -func (x *proxyReader) Read(p []byte) (int, error) { +func (x proxyReader) Read(p []byte) (int, error) { n, err := x.ReadCloser.Read(p) x.bar.IncrBy(n) if err == io.EOF { - go x.bar.SetTotal(0, true) + go x.bar.SetTotal(-1, true) } return n, err } type proxyWriterTo struct { - io.ReadCloser // *proxyReader - wt io.WriterTo - bar *Bar + proxyReader + wt io.WriterTo } -func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) { n, err := x.wt.WriteTo(w) x.bar.IncrInt64(n) if err == io.EOF { - go x.bar.SetTotal(0, true) + go x.bar.SetTotal(-1, true) } return n, err } type ewmaProxyReader struct { - io.ReadCloser // *proxyReader - bar *Bar - iT time.Time + proxyReader } -func (x *ewmaProxyReader) Read(p []byte) (int, error) { - n, err := x.ReadCloser.Read(p) +func (x ewmaProxyReader) Read(p []byte) (int, error) { + start := time.Now() + n, err := x.proxyReader.Read(p) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } type ewmaProxyWriterTo struct { - io.ReadCloser // *ewmaProxyReader - wt io.WriterTo // *proxyWriterTo - bar *Bar - iT time.Time + ewmaProxyReader + wt proxyWriterTo } -func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { +func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { + start := time.Now() n, err := x.wt.WriteTo(w) if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(x.iT)) - x.iT = time.Now() + x.bar.DecoratorEwmaUpdate(time.Since(start)) } return n, err } -func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser { - rc := toReadCloser(r) - rc = &proxyReader{rc, bar} - - if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators { - now := time.Now() - rc = &ewmaProxyReader{rc, bar, now} - if isWriterTo { - rc = &ewmaProxyWriterTo{rc, wt, bar, now} +func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) { + pr := proxyReader{toReadCloser(r), b} + if wt, ok := r.(io.WriterTo); ok { + pw := proxyWriterTo{pr, wt} + if b.hasEwmaDecorators { + rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw} + } else { + rc = pw } - } else if isWriterTo { - rc = &proxyWriterTo{rc, wt, bar} + } else if b.hasEwmaDecorators { + rc = ewmaProxyReader{pr} + } else { + rc = pr } return rc } diff --git a/vendor/github.com/xanzy/ssh-agent/README.md b/vendor/github.com/xanzy/ssh-agent/README.md index d93af40a0c2..e2dfcedca9b 100644 --- a/vendor/github.com/xanzy/ssh-agent/README.md +++ b/vendor/github.com/xanzy/ssh-agent/README.md @@ -16,7 +16,7 @@ If you have an issue: report it on the [issue tracker](https://github.com/xanzy/ ## Author -Sander van Harmelen () +Sander van Harmelen () ## License diff --git a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go index 62956079663..21d3cba2a5c 100644 --- a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go +++ b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go @@ -69,9 +69,6 @@ func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) } } -// Available returns true if Pageant is running -func Available() bool { return pageantWindow() != 0 } - // Query sends message msg to Pageant and returns response or error. // 'msg' is raw agent request with length prefix // Response is raw agent response with length prefix diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go index c46710e88e4..ca77e6a9658 100644 --- a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go +++ b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go @@ -27,17 +27,40 @@ import ( "net" "sync" + "github.com/Microsoft/go-winio" "golang.org/x/crypto/ssh/agent" ) +const ( + sshAgentPipe = `\\.\pipe\openssh-ssh-agent` +) + +// Available returns true if Pageant is running +func Available() bool { + if pageantWindow() != 0 { + return true + } + conn, err := winio.DialPipe(sshAgentPipe, nil) + if err != nil { + return false + } + conn.Close() + return true +} + // New returns a new agent.Agent and the (custom) connection it uses // to communicate with a running pagent.exe instance (see README.md) func New() (agent.Agent, net.Conn, error) { - if !Available() { - return nil, nil, errors.New("SSH agent requested but Pageant not running") + if pageantWindow() != 0 { + return agent.NewClient(&conn{}), nil, nil } - - return agent.NewClient(&conn{}), nil, nil + conn, err := winio.DialPipe(sshAgentPipe, nil) + if err != nil { + return nil, nil, errors.New( + "SSH agent requested, but could not detect Pageant or Windows native SSH agent", + ) + } + return agent.NewClient(conn), nil, nil } type conn struct { diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md index 00059242cab..a4f5f1458ff 100644 --- a/vendor/github.com/xeipuuv/gojsonpointer/README.md +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -35,7 +35,7 @@ An implementation of JSON Pointer - Go language ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +https://tools.ietf.org/html/rfc6901 ### Note The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go similarity index 84% rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry/retry.go rename to vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go index 37b9dabe5d8..3d43f7aea97 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go @@ -12,7 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry" +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry" import ( "context" @@ -54,8 +57,18 @@ type RequestFunc func(context.Context, func(context.Context) error) error // EvaluateFunc returns if an error is retry-able and if an explicit throttle // duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. type EvaluateFunc func(error) (bool, time.Duration) +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { if !c.Enabled { return func(ctx context.Context, fn func(context.Context) error) error { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md index 451ad6dc4df..8a40a86a246 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -33,8 +33,10 @@ The `otlptracehttp` package implements a client for the span exporter that sends ### Environment Variables -The following environment variables can be used -(instead of options objects) to override the default configuration. +The following environment variables can be used (instead of options objects) to +override the default configuration. For more information about how each of +these environment variables is interpreted, see [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). | Environment variable | Option | Default value | | ------------------------------------------------------------------------ |------------------------------ | ----------------------------------- | diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index d795db9127d..7e9bb6c47ae 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -20,7 +20,6 @@ import ( "sync" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" - tracesdk "go.opentelemetry.io/otel/sdk/trace" ) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/connection/connection.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/connection/connection.go deleted file mode 100644 index 6747803945f..00000000000 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/connection/connection.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package connection // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/connection" - -import ( - "context" - "math/rand" - "sync" - "sync/atomic" - "time" - "unsafe" - - "google.golang.org/genproto/googleapis/rpc/errdetails" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "google.golang.org/grpc/encoding/gzip" - - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry" - - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -type Connection struct { - // Ensure pointer is 64-bit aligned for atomic operations on both 32 and 64 bit machines. - lastConnectErrPtr unsafe.Pointer - - // mu protects the Connection as it is accessed by the - // exporter goroutines and background Connection goroutine - mu sync.Mutex - cc *grpc.ClientConn - - // these fields are read-only after constructor is finished - cfg otlpconfig.Config - SCfg otlpconfig.SignalConfig - requestFunc retry.RequestFunc - metadata metadata.MD - newConnectionHandler func(cc *grpc.ClientConn) - - // these channels are created once - disconnectedCh chan bool - backgroundConnectionDoneCh chan struct{} - stopCh chan struct{} - - // this is for tests, so they can replace the closing - // routine without a worry of modifying some global variable - // or changing it back to original after the test is done - closeBackgroundConnectionDoneCh func(ch chan struct{}) -} - -func NewConnection(cfg otlpconfig.Config, sCfg otlpconfig.SignalConfig, handler func(cc *grpc.ClientConn)) *Connection { - c := new(Connection) - c.newConnectionHandler = handler - c.cfg = cfg - c.requestFunc = cfg.RetryConfig.RequestFunc(evaluate) - c.SCfg = sCfg - if len(c.SCfg.Headers) > 0 { - c.metadata = metadata.New(c.SCfg.Headers) - } - c.closeBackgroundConnectionDoneCh = func(ch chan struct{}) { - close(ch) - } - return c -} - -func (c *Connection) StartConnection(ctx context.Context) error { - c.stopCh = make(chan struct{}) - c.disconnectedCh = make(chan bool, 1) - c.backgroundConnectionDoneCh = make(chan struct{}) - - if err := c.connect(ctx); err == nil { - c.setStateConnected() - } else { - c.SetStateDisconnected(err) - } - go c.indefiniteBackgroundConnection() - - // TODO: proper error handling when initializing connections. - // We can report permanent errors, e.g., invalid settings. - return nil -} - -func (c *Connection) LastConnectError() error { - errPtr := (*error)(atomic.LoadPointer(&c.lastConnectErrPtr)) - if errPtr == nil { - return nil - } - return *errPtr -} - -func (c *Connection) saveLastConnectError(err error) { - var errPtr *error - if err != nil { - errPtr = &err - } - atomic.StorePointer(&c.lastConnectErrPtr, unsafe.Pointer(errPtr)) -} - -func (c *Connection) SetStateDisconnected(err error) { - c.saveLastConnectError(err) - select { - case c.disconnectedCh <- true: - default: - } - c.newConnectionHandler(nil) -} - -func (c *Connection) setStateConnected() { - c.saveLastConnectError(nil) -} - -func (c *Connection) Connected() bool { - return c.LastConnectError() == nil -} - -const defaultConnReattemptPeriod = 10 * time.Second - -func (c *Connection) indefiniteBackgroundConnection() { - defer func() { - c.closeBackgroundConnectionDoneCh(c.backgroundConnectionDoneCh) - }() - - connReattemptPeriod := c.cfg.ReconnectionPeriod - if connReattemptPeriod <= 0 { - connReattemptPeriod = defaultConnReattemptPeriod - } - - // No strong seeding required, nano time can - // already help with pseudo uniqueness. - rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) - - // maxJitterNanos: 70% of the connectionReattemptPeriod - maxJitterNanos := int64(0.7 * float64(connReattemptPeriod)) - - for { - // Otherwise these will be the normal scenarios to enable - // reconnection if we trip out. - // 1. If we've stopped, return entirely - // 2. Otherwise block until we are disconnected, and - // then retry connecting - select { - case <-c.stopCh: - return - - case <-c.disconnectedCh: - // Quickly check if we haven't stopped at the - // same time. - select { - case <-c.stopCh: - return - - default: - } - - // Normal scenario that we'll wait for - } - - if err := c.connect(context.Background()); err == nil { - c.setStateConnected() - } else { - // this code is unreachable in most cases - // c.connect does not establish Connection - c.SetStateDisconnected(err) - } - - // Apply some jitter to avoid lockstep retrials of other - // collector-exporters. Lockstep retrials could result in an - // innocent DDOS, by clogging the machine's resources and network. - jitter := time.Duration(rng.Int63n(maxJitterNanos)) - select { - case <-c.stopCh: - return - case <-time.After(connReattemptPeriod + jitter): - } - } -} - -func (c *Connection) connect(ctx context.Context) error { - cc, err := c.dialToCollector(ctx) - if err != nil { - return err - } - c.setConnection(cc) - c.newConnectionHandler(cc) - return nil -} - -// setConnection sets cc as the client Connection and returns true if -// the Connection state changed. -func (c *Connection) setConnection(cc *grpc.ClientConn) bool { - c.mu.Lock() - defer c.mu.Unlock() - - // If previous clientConn is same as the current then just return. - // This doesn't happen right now as this func is only called with new ClientConn. - // It is more about future-proofing. - if c.cc == cc { - return false - } - - // If the previous clientConn was non-nil, close it - if c.cc != nil { - _ = c.cc.Close() - } - c.cc = cc - return true -} - -func (c *Connection) dialToCollector(ctx context.Context) (*grpc.ClientConn, error) { - if c.cfg.GRPCConn != nil { - return c.cfg.GRPCConn, nil - } - - dialOpts := []grpc.DialOption{} - if c.cfg.ServiceConfig != "" { - dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(c.cfg.ServiceConfig)) - } - if c.SCfg.GRPCCredentials != nil { - dialOpts = append(dialOpts, grpc.WithTransportCredentials(c.SCfg.GRPCCredentials)) - } else if c.SCfg.Insecure { - dialOpts = append(dialOpts, grpc.WithInsecure()) - } - if c.SCfg.Compression == otlpconfig.GzipCompression { - dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) - } - if len(c.cfg.DialOptions) != 0 { - dialOpts = append(dialOpts, c.cfg.DialOptions...) - } - - ctx, cancel := c.ContextWithStop(ctx) - defer cancel() - ctx = c.ContextWithMetadata(ctx) - return grpc.DialContext(ctx, c.SCfg.Endpoint, dialOpts...) -} - -func (c *Connection) ContextWithMetadata(ctx context.Context) context.Context { - if c.metadata.Len() > 0 { - return metadata.NewOutgoingContext(ctx, c.metadata) - } - return ctx -} - -func (c *Connection) Shutdown(ctx context.Context) error { - close(c.stopCh) - // Ensure that the backgroundConnector returns - select { - case <-c.backgroundConnectionDoneCh: - case <-ctx.Done(): - return ctx.Err() - } - - c.mu.Lock() - cc := c.cc - c.cc = nil - c.mu.Unlock() - - if cc != nil { - return cc.Close() - } - - return nil -} - -func (c *Connection) ContextWithStop(ctx context.Context) (context.Context, context.CancelFunc) { - // Unify the parent context Done signal with the Connection's - // stop channel. - ctx, cancel := context.WithCancel(ctx) - go func(ctx context.Context, cancel context.CancelFunc) { - select { - case <-ctx.Done(): - // Nothing to do, either cancelled or deadline - // happened. - case <-c.stopCh: - cancel() - } - }(ctx, cancel) - return ctx, cancel -} - -func (c *Connection) DoRequest(ctx context.Context, fn func(context.Context) error) error { - ctx, cancel := c.ContextWithStop(ctx) - defer cancel() - return c.requestFunc(ctx, func(ctx context.Context) error { - err := fn(ctx) - // nil is converted to OK. - if status.Code(err) == codes.OK { - // Success. - return nil - } - return err - }) -} - -// evaluate returns if err is retry-able and a duration to wait for if an -// explicit throttle time is included in err. -func evaluate(err error) (bool, time.Duration) { - s := status.Convert(err) - switch s.Code() { - case codes.Canceled, - codes.DeadlineExceeded, - codes.ResourceExhausted, - codes.Aborted, - codes.OutOfRange, - codes.Unavailable, - codes.DataLoss: - return true, throttleDelay(s) - } - - // Not a retry-able error. - return false, 0 -} - -// throttleDelay returns a duration to wait for if an explicit throttle time -// is included in the response status. -func throttleDelay(status *status.Status) time.Duration { - for _, detail := range status.Details() { - if t, ok := detail.(*errdetails.RetryInfo); ok { - return t.RetryDelay.AsDuration() - } - } - return 0 -} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go index 352a88206da..2d1937beda2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go @@ -20,7 +20,7 @@ import ( "io/ioutil" "net/url" "os" - "regexp" + "path" "strconv" "strings" "time" @@ -28,24 +28,17 @@ import ( "go.opentelemetry.io/otel" ) -var httpSchemeRegexp = regexp.MustCompile(`(?i)^(http://|https://)`) +var DefaultEnvOptionsReader = EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: ioutil.ReadFile, +} func ApplyGRPCEnvConfigs(cfg *Config) { - e := EnvOptionsReader{ - GetEnv: os.Getenv, - ReadFile: ioutil.ReadFile, - } - - e.ApplyGRPCEnvConfigs(cfg) + DefaultEnvOptionsReader.ApplyGRPCEnvConfigs(cfg) } func ApplyHTTPEnvConfigs(cfg *Config) { - e := EnvOptionsReader{ - GetEnv: os.Getenv, - ReadFile: ioutil.ReadFile, - } - - e.ApplyHTTPEnvConfigs(cfg) + DefaultEnvOptionsReader.ApplyHTTPEnvConfigs(cfg) } type EnvOptionsReader struct { @@ -71,23 +64,55 @@ func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { var opts []GenericOption // Endpoint - if v, ok := e.getEnvValue("ENDPOINT"); ok { - if isInsecureEndpoint(v) { - opts = append(opts, WithInsecure()) - } else { - opts = append(opts, WithSecure()) - } - - opts = append(opts, WithEndpoint(trimSchema(v))) - } if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok { - if isInsecureEndpoint(v) { - opts = append(opts, WithInsecure()) - } else { - opts = append(opts, WithSecure()) + u, err := url.Parse(v) + // Ignore invalid values. + if err == nil { + // This is used to set the scheme for OTLP/HTTP. + if insecureSchema(u.Scheme) { + opts = append(opts, WithInsecure()) + } else { + opts = append(opts, WithSecure()) + } + opts = append(opts, newSplitOption(func(cfg *Config) { + cfg.Traces.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Traces.URLPath = path + }, func(cfg *Config) { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + })) + } + } else if v, ok = e.getEnvValue("ENDPOINT"); ok { + u, err := url.Parse(v) + // Ignore invalid values. + if err == nil { + // This is used to set the scheme for OTLP/HTTP. + if insecureSchema(u.Scheme) { + opts = append(opts, WithInsecure()) + } else { + opts = append(opts, WithSecure()) + } + opts = append(opts, newSplitOption(func(cfg *Config) { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + }, func(cfg *Config) { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + })) } - - opts = append(opts, WithEndpoint(trimSchema(v))) } // Certificate File @@ -136,12 +161,13 @@ func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { return opts } -func isInsecureEndpoint(endpoint string) bool { - return strings.HasPrefix(strings.ToLower(endpoint), "http://") || strings.HasPrefix(strings.ToLower(endpoint), "unix://") -} - -func trimSchema(endpoint string) string { - return httpSchemeRegexp.ReplaceAllString(endpoint, "") +func insecureSchema(schema string) bool { + switch strings.ToLower(schema) { + case "http", "unix": + return true + default: + return false + } } // getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go index 8e21250c674..47c0344c62c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go @@ -20,9 +20,11 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding/gzip" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" ) const ( @@ -76,6 +78,46 @@ func NewDefaultConfig() Config { return c } +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + cfg := NewDefaultConfig() + ApplyGRPCEnvConfigs(&cfg) + for _, opt := range opts { + opt.ApplyGRPCOption(&cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Traces.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) + } else if cfg.Traces.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithInsecure()) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Traces.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Traces.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if len(cfg.DialOptions) != 0 { + cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + type ( // GenericOption applies an option to the HTTP or gRPC driver. GenericOption interface { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index 517411b2403..d9086a390de 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -16,9 +16,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr import ( "go.opentelemetry.io/otel/attribute" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" - "go.opentelemetry.io/otel/sdk/resource" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 25d3c1ba4b7..6246b17f578 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -15,9 +15,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - commonpb "go.opentelemetry.io/proto/otlp/common/v1" - "go.opentelemetry.io/otel/sdk/instrumentation" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) func InstrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go index c2b17a6b288..05a1f78adbc 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -15,9 +15,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" - "go.opentelemetry.io/otel/sdk/resource" + resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" ) // Resource transforms a Resource into an OTLP Resource. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index 09dd7081843..2f0f5eacb77 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -17,11 +17,10 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" - "go.opentelemetry.io/otel/sdk/instrumentation" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index b17b9291c2c..d709ffa96e1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -17,94 +17,259 @@ package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptra import ( "context" "errors" - "fmt" "sync" + "time" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/connection" - - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" - + "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { - connection *connection.Connection + endpoint string + dialOpts []grpc.DialOption + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc - lock sync.Mutex - tracesClient coltracepb.TraceServiceClient + // stopCtx is used as a parent context for all exports. Therefore, when it + // is canceled with the stopFunc all exports are canceled. + stopCtx context.Context + // stopFunc cancels stopCtx, stopping any active exports. + stopFunc context.CancelFunc + + // ourConn keeps track of where conn was created: true if created here on + // Start, or false if passed with an option. This is important on Shutdown + // as the conn should only be closed if created here on start. Otherwise, + // it is up to the processes that passed the conn to close it. + ourConn bool + conn *grpc.ClientConn + tscMu sync.RWMutex + tsc coltracepb.TraceServiceClient } +// Compile time check *client implements otlptrace.Client. var _ otlptrace.Client = (*client)(nil) -var ( - errNoClient = errors.New("no client") -) - // NewClient creates a new gRPC trace client. func NewClient(opts ...Option) otlptrace.Client { - cfg := otlpconfig.NewDefaultConfig() - otlpconfig.ApplyGRPCEnvConfigs(&cfg) - for _, opt := range opts { - opt.applyGRPCOption(&cfg) + return newClient(opts...) +} + +func newClient(opts ...Option) *client { + cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) + + ctx, cancel := context.WithCancel(context.Background()) + + c := &client{ + endpoint: cfg.Traces.Endpoint, + exportTimeout: cfg.Traces.Timeout, + requestFunc: cfg.RetryConfig.RequestFunc(retryable), + dialOpts: cfg.DialOptions, + stopCtx: ctx, + stopFunc: cancel, + conn: cfg.GRPCConn, } - c := &client{} - c.connection = connection.NewConnection(cfg, cfg.Traces, c.handleNewConnection) + if len(cfg.Traces.Headers) > 0 { + c.metadata = metadata.New(cfg.Traces.Headers) + } return c } -func (c *client) handleNewConnection(cc *grpc.ClientConn) { - c.lock.Lock() - defer c.lock.Unlock() - if cc != nil { - c.tracesClient = coltracepb.NewTraceServiceClient(cc) - } else { - c.tracesClient = nil +// Start establishes a gRPC connection to the collector. +func (c *client) Start(ctx context.Context) error { + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...) + if err != nil { + return err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn } -} -// Start establishes a connection to the collector. -func (c *client) Start(ctx context.Context) error { - return c.connection.StartConnection(ctx) + // The otlptrace.Client interface states this method is called just once, + // so no need to check if already started. + c.tscMu.Lock() + c.tsc = coltracepb.NewTraceServiceClient(c.conn) + c.tscMu.Unlock() + + return nil } -// Stop shuts down the connection to the collector. +var errAlreadyStopped = errors.New("the client is already stopped") + +// Stop shuts down the client. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// This method synchronizes with the UploadTraces method of the client. It +// will wait for any active calls to that method to complete unimpeded, or it +// will cancel any active calls if ctx expires. If ctx expires, the context +// error will be forwarded as the returned error. All client held resources +// will still be released in this situation. +// +// If the client has already stopped, an error will be returned describing +// this. func (c *client) Stop(ctx context.Context) error { - return c.connection.Shutdown(ctx) + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() + var err error + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force + // the clear of the lock and save the timeout error to return and + // signal the shutdown timed out before cleanly stopping. + c.stopFunc() + err = ctx.Err() + + // To ensure the client is not left in a dirty state c.tsc needs to be + // set to nil. To avoid the race condition when doing this, ensure + // that all the exports are killed (initiated by c.stopFunc). + <-acquired + case <-acquired: + } + // Hold the tscMu lock for the rest of the function to ensure no new + // exports are started. + defer c.tscMu.Unlock() + + // The otlptrace.Client interface states this method is called only + // once, but there is no guarantee it is called after Start. Ensure the + // client is started before doing anything and let the called know if they + // made a mistake. + if c.tsc == nil { + return errAlreadyStopped + } + + // Clear c.tsc to signal the client is stopped. + c.tsc = nil + + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + return err } -// UploadTraces sends a batch of spans to the collector. +var errShutdown = errors.New("the client is shutdown") + +// UploadTraces sends a batch of spans. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { - if !c.connection.Connected() { - return fmt.Errorf("traces exporter is disconnected from the server %s: %w", c.connection.SCfg.Endpoint, c.connection.LastConnectError()) + // Hold a read lock to ensure a shut down initiated after this starts does + // not abandon the export. This read lock acquire has less priority than a + // write lock acquire (i.e. Stop), meaning if the client is shutting down + // this will come after the shut down. + c.tscMu.RLock() + defer c.tscMu.RUnlock() + + if c.tsc == nil { + return errShutdown } - ctx, cancel := c.connection.ContextWithStop(ctx) + ctx, cancel := c.exportContext(ctx) defer cancel() - ctx, tCancel := context.WithTimeout(ctx, c.connection.SCfg.Timeout) - defer tCancel() - - ctx = c.connection.ContextWithMetadata(ctx) - err := func() error { - c.lock.Lock() - defer c.lock.Unlock() - if c.tracesClient == nil { - return errNoClient - } - return c.connection.DoRequest(ctx, func(ctx context.Context) error { - _, err := c.tracesClient.Export(ctx, &coltracepb.ExportTraceServiceRequest{ - ResourceSpans: protoSpans, - }) - return err + + return c.requestFunc(ctx, func(iCtx context.Context) error { + _, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, }) + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, c.metadata) + } + + // Unify the client stopCtx with the parent. + go func() { + select { + case <-ctx.Done(): + case <-c.stopCtx.Done(): + // Cancel the export as the shutdown has timed out. + cancel() + } }() - if err != nil { - c.connection.SetStateDisconnected(err) + + return ctx, cancel +} + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + //func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.ResourceExhausted, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + return true, throttleDelay(s) } - return err + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns a duration to wait for if an explicit throttle time +// is included in the response status. +func throttleDelay(status *status.Status) time.Duration { + for _, detail := range status.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return t.RetryDelay.AsDuration() + } + } + return 0 } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index 86bfc1dc8a5..0b27a35a8e1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -18,12 +18,12 @@ import ( "fmt" "time" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry" - "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" ) // Option applies an option to the gRPC driver. @@ -31,8 +31,19 @@ type Option interface { applyGRPCOption(*otlpconfig.Config) } -// RetryConfig defines configuration for retrying batches in case of export -// failure using an exponential backoff. +func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { + converted := make([]otlpconfig.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying export of span batches that +// failed to be received by the target endpoint. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. type RetryConfig retry.Config type wrappedOption struct { @@ -43,22 +54,28 @@ func (w wrappedOption) applyGRPCOption(cfg *otlpconfig.Config) { w.ApplyGRPCOption(cfg) } -// WithInsecure disables client transport security for the exporter's gRPC connection -// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure -// does. Note, by default, client security is required unless WithInsecure is used. +// WithInsecure disables client transport security for the exporter's gRPC +// connection just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by +// default, client security is required unless WithInsecure is used. +// +// This option has no effect if WithGRPCConn is used. func WithInsecure() Option { return wrappedOption{otlpconfig.WithInsecure()} } -// WithEndpoint allows one to set the endpoint that the exporter will -// connect to the collector on. If unset, it will instead try to use -// connect to DefaultCollectorHost:DefaultCollectorPort. +// WithEndpoint sets the target endpoint the exporter will connect to. If +// unset, localhost:4317 will be used as a default. +// +// This option has no effect if WithGRPCConn is used. func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } -// WithReconnectionPeriod allows one to set the delay between next connection attempt -// after failing to connect with the collector. +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. func WithReconnectionPeriod(rp time.Duration) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) { cfg.ReconnectionPeriod = rp @@ -75,25 +92,30 @@ func compressorToCompression(compressor string) otlpconfig.Compression { return otlpconfig.NoCompression } -// WithCompressor will set the compressor for the gRPC client to use when sending requests. -// It is the responsibility of the caller to ensure that the compressor set has been registered -// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some -// compressors auto-register on import, such as gzip, which can be registered by calling +// WithCompressor sets the compressor for the gRPC client to use when sending +// requests. It is the responsibility of the caller to ensure that the +// compressor set has been registered with google.golang.org/grpc/encoding. +// This can be done by encoding.RegisterCompressor. Some compressors +// auto-register on import, such as gzip, which can be registered by calling // `import _ "google.golang.org/grpc/encoding/gzip"`. +// +// This option has no effect if WithGRPCConn is used. func WithCompressor(compressor string) Option { return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} } -// WithHeaders will send the provided headers with gRPC requests. +// WithHeaders will send the provided headers with each gRPC requests. func WithHeaders(headers map[string]string) Option { return wrappedOption{otlpconfig.WithHeaders(headers)} } -// WithTLSCredentials allows the connection to use TLS credentials -// when talking to the server. It takes in grpc.TransportCredentials instead -// of say a Certificate file or a tls.Certificate, because the retrieving of -// these credentials can be done in many ways e.g. plain file, in code tls.Config -// or by certificate rotation, so it is up to the caller to decide what to use. +// WithTLSCredentials allows the connection to use TLS credentials when +// talking to the server. It takes in grpc.TransportCredentials instead of say +// a Certificate file or a tls.Certificate, because the retrieving of these +// credentials can be done in many ways e.g. plain file, in code tls.Config or +// by certificate rotation, so it is up to the caller to decide what to use. +// +// This option has no effect if WithGRPCConn is used. func WithTLSCredentials(creds credentials.TransportCredentials) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) { cfg.Traces.GRPCCredentials = creds @@ -101,40 +123,63 @@ func WithTLSCredentials(creds credentials.TransportCredentials) Option { } // WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. func WithServiceConfig(serviceConfig string) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) { cfg.ServiceConfig = serviceConfig })} } -// WithDialOption opens support to any grpc.DialOption to be used. If it conflicts -// with some other configuration the GRPC specified via the collector the ones here will -// take preference since they are set last. +// WithDialOption sets explicit grpc.DialOptions to use when making a +// connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// +// This option has no effect if WithGRPCConn is used. func WithDialOption(opts ...grpc.DialOption) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) { cfg.DialOptions = opts })} } -// WithGRPCConn allows reusing existing gRPC connection when it has already been -// established for other services. When set, other dial options will be ignored. +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The client +// Shutdown method will not close this connection. func WithGRPCConn(conn *grpc.ClientConn) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) { cfg.GRPCConn = conn })} } -// WithTimeout tells the driver the max waiting time for the backend to process -// each spans batch. If unset, the default will be 10 seconds. +// WithTimeout sets the max amount of time a client will attempt to export a +// batch of spans. This takes precedence over any retry settings defined with +// WithRetry, once this time limit has been reached the export is abandoned +// and the batch of spans is dropped. +// +// If unset, the default timeout will be set to 10 seconds. func WithTimeout(duration time.Duration) Option { return wrappedOption{otlpconfig.WithTimeout(duration)} } -// WithRetry configures the retry policy for transient errors that may occurs -// when exporting traces. An exponential back-off algorithm is used to ensure -// endpoints are not overwhelmed with retries. If unset, the default retry -// policy will retry after 5 seconds and increase exponentially after each -// error for a total of 1 minute. +// WithRetry sets the retry policy for transient retryable errors that may be +// returned by the target endpoint when exporting a batch of spans. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response. That time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. func WithRetry(settings RetryConfig) Option { return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 6bb2a2805aa..eb0ecd2cf91 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "sync" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -36,26 +37,12 @@ type Resource struct { } var ( - emptyResource Resource - - defaultResource = func(r *Resource, err error) *Resource { - if err != nil { - otel.Handle(err) - } - return r - }( - Detect( - context.Background(), - defaultServiceNameDetector{}, - fromEnv{}, - telemetrySDK{}, - ), - ) + emptyResource Resource + defaultResource *Resource + defaultResourceOnce sync.Once ) -var ( - errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") -) +var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") // New returns a Resource combined from the user-provided detectors. func New(ctx context.Context, opts ...Option) (*Resource, error) { @@ -211,6 +198,22 @@ func Empty() *Resource { // Default returns an instance of Resource with a default // "service.name" and OpenTelemetrySDK attributes. func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultResource, err = Detect( + context.Background(), + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &emptyResource + } + }) return defaultResource } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 46f66049edd..75f519a67b4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -22,6 +22,7 @@ import ( "time" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/trace" ) @@ -224,6 +225,7 @@ func WithBlocking() BatchSpanProcessorOption { // exportSpans is a subroutine of processing and draining the queue. func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { + bsp.timer.Reset(bsp.o.BatchTimeout) bsp.batchMutex.Lock() @@ -236,6 +238,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { } if l := len(bsp.batch); l > 0 { + global.Debug("exporting spans", "count", len(bsp.batch)) err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 3a32445bc75..47ff5fad1e2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -21,10 +21,9 @@ import ( "sync/atomic" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index a12c68b40a9..41a68b58551 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -25,12 +25,11 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" ) // ReadOnlySpan allows reading information from the data structure underlying a diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index afd4b5b5d5b..1177c729a8f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -18,9 +18,8 @@ import ( "context" "time" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" ) type tracer struct { diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index 8298228d33e..de75b592ca9 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -48,7 +48,7 @@ type AnyValue struct { unknownFields protoimpl.UnknownFields // The value is one of the listed fields. It is valid for all values to be unspecified - // in which case this AnyValue is considered to be "null". + // in which case this AnyValue is considered to be "empty". // // Types that are assignable to Value: // *AnyValue_StringValue diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index a37dbe998a3..a45dac3c660 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -112,7 +112,7 @@ func (x Span_SpanKind) Number() protoreflect.EnumNumber { // Deprecated: Use Span_SpanKind.Descriptor instead. func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2, 0} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} } type Status_DeprecatedStatusCode int32 @@ -203,7 +203,7 @@ func (x Status_DeprecatedStatusCode) Number() protoreflect.EnumNumber { // Deprecated: Use Status_DeprecatedStatusCode.Descriptor instead. func (Status_DeprecatedStatusCode) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} } // For the semantics of status codes see @@ -258,7 +258,69 @@ func (x Status_StatusCode) Number() protoreflect.EnumNumber { // Deprecated: Use Status_StatusCode.Descriptor instead. func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 1} +} + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type TracesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *TracesData) Reset() { + *x = TracesData{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracesData) ProtoMessage() {} + +func (x *TracesData) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracesData.ProtoReflect.Descriptor instead. +func (*TracesData) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +func (x *TracesData) GetResourceSpans() []*ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil } // A collection of InstrumentationLibrarySpans from a Resource. @@ -281,7 +343,7 @@ type ResourceSpans struct { func (x *ResourceSpans) Reset() { *x = ResourceSpans{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -294,7 +356,7 @@ func (x *ResourceSpans) String() string { func (*ResourceSpans) ProtoMessage() {} func (x *ResourceSpans) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -307,7 +369,7 @@ func (x *ResourceSpans) ProtoReflect() protoreflect.Message { // Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead. func (*ResourceSpans) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1} } func (x *ResourceSpans) GetResource() *v1.Resource { @@ -350,7 +412,7 @@ type InstrumentationLibrarySpans struct { func (x *InstrumentationLibrarySpans) Reset() { *x = InstrumentationLibrarySpans{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -363,7 +425,7 @@ func (x *InstrumentationLibrarySpans) String() string { func (*InstrumentationLibrarySpans) ProtoMessage() {} func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -376,7 +438,7 @@ func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { // Deprecated: Use InstrumentationLibrarySpans.ProtoReflect.Descriptor instead. func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} } func (x *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { @@ -447,9 +509,7 @@ type Span struct { // This makes it easier to correlate spans in different traces. // // This field is semantically required to be set to non-empty string. - // When null or empty string received - receiver may use string "name" - // as a replacement. There might be smarted algorithms implemented by - // receiver to fix the empty span name. + // Empty value is equivalent to an unknown span name. // // This field is required. Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` @@ -505,7 +565,7 @@ type Span struct { func (x *Span) Reset() { *x = Span{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -518,7 +578,7 @@ func (x *Span) String() string { func (*Span) ProtoMessage() {} func (x *Span) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -531,7 +591,7 @@ func (x *Span) ProtoReflect() protoreflect.Message { // Deprecated: Use Span.ProtoReflect.Descriptor instead. func (*Span) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} } func (x *Span) GetTraceId() []byte { @@ -664,7 +724,7 @@ type Status struct { func (x *Status) Reset() { *x = Status{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -677,7 +737,7 @@ func (x *Status) String() string { func (*Status) ProtoMessage() {} func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -690,7 +750,7 @@ func (x *Status) ProtoReflect() protoreflect.Message { // Deprecated: Use Status.ProtoReflect.Descriptor instead. func (*Status) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4} } // Deprecated: Do not use. @@ -737,7 +797,7 @@ type Span_Event struct { func (x *Span_Event) Reset() { *x = Span_Event{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -750,7 +810,7 @@ func (x *Span_Event) String() string { func (*Span_Event) ProtoMessage() {} func (x *Span_Event) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -763,7 +823,7 @@ func (x *Span_Event) ProtoReflect() protoreflect.Message { // Deprecated: Use Span_Event.ProtoReflect.Descriptor instead. func (*Span_Event) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2, 0} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} } func (x *Span_Event) GetTimeUnixNano() uint64 { @@ -820,7 +880,7 @@ type Span_Link struct { func (x *Span_Link) Reset() { *x = Span_Link{} if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -833,7 +893,7 @@ func (x *Span_Link) String() string { func (*Span_Link) ProtoMessage() {} func (x *Span_Link) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -846,7 +906,7 @@ func (x *Span_Link) ProtoReflect() protoreflect.Message { // Deprecated: Use Span_Link.ProtoReflect.Descriptor instead. func (*Span_Link) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2, 1} + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1} } func (x *Span_Link) GetTraceId() []byte { @@ -897,189 +957,195 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf4, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x7d, 0x0a, - 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, - 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xe6, 0x01, 0x0a, 0x1b, - 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x69, - 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, - 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x73, - 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, - 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, - 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, - 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, - 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, - 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xf4, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, - 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, - 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, - 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, - 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, - 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, - 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x7d, 0x0a, 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, + 0x73, 0x52, 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xe6, 0x01, + 0x0a, 0x1b, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x6e, 0x0a, + 0x17, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, - 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, - 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, - 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, - 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, - 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, - 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, - 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, - 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, - 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x38, 0x0a, + 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, + 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, - 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, - 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, - 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, - 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, - 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, - 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, - 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, - 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, - 0x52, 0x10, 0x05, 0x22, 0xfc, 0x07, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x66, - 0x0a, 0x0f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, - 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x44, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, - 0x64, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, - 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0xda, 0x05, 0x0a, 0x14, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1d, - 0x0a, 0x19, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x24, 0x0a, - 0x20, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x28, 0x0a, 0x24, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, - 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x2b, 0x0a, - 0x27, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, - 0x41, 0x52, 0x47, 0x55, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x2c, 0x0a, 0x28, 0x44, 0x45, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, + 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, + 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, + 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, + 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, + 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, + 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, + 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, + 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, + 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, + 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, + 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, + 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, + 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, + 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, + 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, + 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, + 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, + 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, 0xfc, 0x07, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x66, 0x0a, 0x0f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, + 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0xda, 0x05, 0x0a, 0x14, 0x44, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x00, 0x12, + 0x24, 0x0a, 0x20, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, + 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x28, 0x0a, 0x24, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, + 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, + 0x2b, 0x0a, 0x27, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x2c, 0x0a, 0x28, + 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x5f, + 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x5f, 0x45, 0x58, - 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x45, 0x50, 0x52, - 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, - 0x44, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x29, - 0x0a, 0x25, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, - 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, 0x2c, 0x0a, 0x28, 0x44, 0x45, 0x50, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, + 0x12, 0x29, 0x0a, 0x25, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, + 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12, 0x2c, 0x0a, 0x28, 0x44, + 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x2d, 0x0a, 0x29, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, - 0x4f, 0x44, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, - 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x2d, 0x0a, 0x29, 0x44, 0x45, 0x50, 0x52, 0x45, - 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, - 0x45, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, - 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x2e, 0x0a, 0x2a, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, - 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, - 0x5f, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x45, + 0x4f, 0x44, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, + 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x2e, 0x0a, 0x2a, 0x44, 0x45, 0x50, 0x52, + 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, + 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x45, 0x50, 0x52, + 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x27, 0x0a, 0x23, + 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, + 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x28, 0x0a, 0x24, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, + 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, + 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, + 0x29, 0x0a, 0x25, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, + 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x0d, 0x12, 0x26, 0x0a, 0x22, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x0b, 0x12, 0x28, 0x0a, 0x24, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, - 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, - 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x29, 0x0a, - 0x25, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x0d, 0x12, 0x26, 0x0a, 0x22, 0x44, 0x45, 0x50, 0x52, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, + 0x10, 0x0e, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x2a, 0x0a, 0x26, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, - 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, - 0x12, 0x24, 0x0a, 0x20, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x2a, 0x0a, 0x26, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, - 0x5f, 0x55, 0x4e, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, - 0x10, 0x10, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, - 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x10, 0x02, 0x42, 0x58, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, - 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x55, 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x44, 0x10, 0x10, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, + 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, + 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x10, 0x02, 0x42, 0x58, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1095,40 +1161,42 @@ func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { } var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind (Status_DeprecatedStatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.DeprecatedStatusCode (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode - (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans - (*InstrumentationLibrarySpans)(nil), // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span - (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status - (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event - (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link - (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource - (*v11.InstrumentationLibrary)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationLibrary - (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue + (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans + (*InstrumentationLibrarySpans)(nil), // 5: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans + (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationLibrary)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationLibrary + (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue } var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ - 9, // 0: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource - 4, // 1: opentelemetry.proto.trace.v1.ResourceSpans.instrumentation_library_spans:type_name -> opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - 10, // 2: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.instrumentation_library:type_name -> opentelemetry.proto.common.v1.InstrumentationLibrary - 5, // 3: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.spans:type_name -> opentelemetry.proto.trace.v1.Span - 0, // 4: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind - 11, // 5: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 7, // 6: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event - 8, // 7: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link - 6, // 8: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status - 1, // 9: opentelemetry.proto.trace.v1.Status.deprecated_code:type_name -> opentelemetry.proto.trace.v1.Status.DeprecatedStatusCode - 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode - 11, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 11, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue - 13, // [13:13] is the sub-list for method output_type - 13, // [13:13] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name + 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.instrumentation_library_spans:type_name -> opentelemetry.proto.trace.v1.InstrumentationLibrarySpans + 11, // 3: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.instrumentation_library:type_name -> opentelemetry.proto.common.v1.InstrumentationLibrary + 6, // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 0, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind + 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event + 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link + 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status + 1, // 10: opentelemetry.proto.trace.v1.Status.deprecated_code:type_name -> opentelemetry.proto.trace.v1.Status.DeprecatedStatusCode + 2, // 11: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode + 12, // 12: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 12, // 13: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name } func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() } @@ -1138,7 +1206,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } if !protoimpl.UnsafeEnabled { file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceSpans); i { + switch v := v.(*TracesData); i { case 0: return &v.state case 1: @@ -1150,7 +1218,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InstrumentationLibrarySpans); i { + switch v := v.(*ResourceSpans); i { case 0: return &v.state case 1: @@ -1162,7 +1230,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span); i { + switch v := v.(*InstrumentationLibrarySpans); i { case 0: return &v.state case 1: @@ -1174,7 +1242,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Status); i { + switch v := v.(*Span); i { case 0: return &v.state case 1: @@ -1186,7 +1254,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span_Event); i { + switch v := v.(*Status); i { case 0: return &v.state case 1: @@ -1198,6 +1266,18 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Span_Link); i { case 0: return &v.state @@ -1216,7 +1296,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc, NumEnums: 3, - NumMessages: 6, + NumMessages: 7, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 916c840b698..6605bf64497 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -14,7 +14,7 @@ import ( "time" ) -// These constants from [PROTOCOL.certkeys] represent the algorithm names +// These constants from [PROTOCOL.certkeys] represent the key algorithm names // for certificate types supported by this package. const ( CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" @@ -27,6 +27,14 @@ const ( CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" ) +// These constants from [PROTOCOL.certkeys] represent additional signature +// algorithm names for certificate types supported by this package. +const ( + CertSigAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertSigAlgoRSASHA2256v01 = "rsa-sha2-256-cert-v01@openssh.com" + CertSigAlgoRSASHA2512v01 = "rsa-sha2-512-cert-v01@openssh.com" +) + // Certificate types distinguish between host and user // certificates. The values can be set in the CertType field of // Certificate. @@ -423,6 +431,12 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } c.SignatureKey = authority.PublicKey() + if v, ok := authority.(AlgorithmSigner); ok { + if v.PublicKey().Type() == KeyAlgoRSA { + authority = &rsaSigner{v, SigAlgoRSASHA2512} + } + } + sig, err := authority.Sign(rand, c.bytesForSigning()) if err != nil { return err @@ -431,8 +445,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { return nil } +// certAlgoNames includes a mapping from signature algorithms to the +// corresponding certificate signature algorithm. When a key type (such +// as ED25516) is associated with only one algorithm, the KeyAlgo +// constant is used instead of the SigAlgo. var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, + SigAlgoRSA: CertSigAlgoRSAv01, + SigAlgoRSASHA2256: CertSigAlgoRSASHA2256v01, + SigAlgoRSASHA2512: CertSigAlgoRSASHA2512v01, KeyAlgoDSA: CertAlgoDSAv01, KeyAlgoECDSA256: CertAlgoECDSA256v01, KeyAlgoECDSA384: CertAlgoECDSA384v01, diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index bddbde5dbde..f8bdf4984cb 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -394,6 +394,10 @@ func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) } c.incIV() + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + padding := plain[0] if padding < 4 { // padding is a byte, so it automatically satisfies @@ -710,6 +714,10 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([ plain := c.buf[4:contentEnd] s.XORKeyStream(plain, plain) + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + padding := plain[0] if padding < 4 { // padding is a byte, so it automatically satisfies diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index 99f68bd32e9..ba8621a8915 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -115,12 +115,25 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e // verifyHostKeySignature verifies the host key obtained in the key // exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { +func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { sig, rest, ok := parseSignatureBody(result.Signature) if len(rest) > 0 || !ok { return errors.New("ssh: signature parse error") } + // For keys, underlyingAlgo is exactly algo. For certificates, + // we have to look up the underlying key algorithm that SSH + // uses to evaluate signatures. + underlyingAlgo := algo + for sigAlgo, certAlgo := range certAlgoNames { + if certAlgo == algo { + underlyingAlgo = sigAlgo + } + } + if sig.Format != underlyingAlgo { + return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, underlyingAlgo) + } + return hostKey.Verify(result.H, sig) } diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 290382d059e..5ae2275744c 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -69,11 +69,13 @@ var preferredKexAlgos = []string{ // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, + CertSigAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, + SigAlgoRSASHA2512, SigAlgoRSASHA2256, + SigAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, } @@ -90,16 +92,20 @@ var supportedCompressions = []string{compressionNone} // hashFuncs keeps the mapping of supported algorithms to their respective // hashes needed for signature verification. var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, + SigAlgoRSA: crypto.SHA1, + SigAlgoRSASHA2256: crypto.SHA256, + SigAlgoRSASHA2512: crypto.SHA512, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertSigAlgoRSAv01: crypto.SHA1, + CertSigAlgoRSASHA2256v01: crypto.SHA256, + CertSigAlgoRSASHA2512v01: crypto.SHA512, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, } // unexpectedMessageError results when the SSH message that we received didn't diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 2b10b05a498..05ad49c3647 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -457,8 +457,15 @@ func (t *handshakeTransport) sendKexInit() error { if len(t.hostKeys) > 0 { for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) + algo := k.PublicKey().Type() + switch algo { + case KeyAlgoRSA: + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{SigAlgoRSASHA2512, SigAlgoRSASHA2256, SigAlgoRSA}...) + case CertAlgoRSAv01: + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, CertSigAlgoRSAv01}...) + default: + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) + } } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms @@ -614,8 +621,22 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { var hostKey Signer for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { + kt := k.PublicKey().Type() + if kt == algs.hostKey { hostKey = k + } else if signer, ok := k.(AlgorithmSigner); ok { + // Some signature algorithms don't show up as key types + // so we have to manually check for a compatible host key. + switch kt { + case KeyAlgoRSA: + if algs.hostKey == SigAlgoRSASHA2256 || algs.hostKey == SigAlgoRSASHA2512 { + hostKey = &rsaSigner{signer, algs.hostKey} + } + case CertAlgoRSAv01: + if algs.hostKey == CertSigAlgoRSASHA2256v01 || algs.hostKey == CertSigAlgoRSASHA2512v01 { + hostKey = &rsaSigner{signer, certToPrivAlgo(algs.hostKey)} + } + } } } @@ -634,7 +655,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics * return nil, err } - if err := verifyHostKeySignature(hostKey, result); err != nil { + if err := verifyHostKeySignature(hostKey, algs.hostKey, result); err != nil { return nil, err } diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 31f26349a05..c67d3a31cbe 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -939,6 +939,15 @@ func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { return &dsaPrivateKey{key}, nil } +type rsaSigner struct { + AlgorithmSigner + defaultAlgorithm string +} + +func (s *rsaSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.AlgorithmSigner.SignWithAlgorithm(rand, data, s.defaultAlgorithm) +} + type wrappedSigner struct { signer crypto.Signer pubKey PublicKey diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index b6911e8306d..6a58e120892 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -284,7 +284,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) func isAcceptableAlgo(algo string) bool { switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, + case SigAlgoRSA, SigAlgoRSASHA2256, SigAlgoRSASHA2512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: return true } diff --git a/vendor/modules.txt b/vendor/modules.txt index 594eba59af9..c243febfe4d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -6,14 +6,14 @@ github.com/Azure/go-ansiterm/winterm ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal -# github.com/Microsoft/go-winio v0.5.1 -## explicit; go 1.12 +# github.com/Microsoft/go-winio v0.5.2 +## explicit; go 1.13 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/backuptar github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/pkg/security github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.8.23 +# github.com/Microsoft/hcsshim v0.9.2 ## explicit; go 1.13 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -35,6 +35,24 @@ github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/internal/winapi github.com/Microsoft/hcsshim/osversion +# github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f +## explicit; go 1.13 +github.com/ProtonMail/go-crypto/bitcurves +github.com/ProtonMail/go-crypto/brainpool +github.com/ProtonMail/go-crypto/eax +github.com/ProtonMail/go-crypto/internal/byteutil +github.com/ProtonMail/go-crypto/ocb +github.com/ProtonMail/go-crypto/openpgp +github.com/ProtonMail/go-crypto/openpgp/aes/keywrap +github.com/ProtonMail/go-crypto/openpgp/armor +github.com/ProtonMail/go-crypto/openpgp/ecdh +github.com/ProtonMail/go-crypto/openpgp/elgamal +github.com/ProtonMail/go-crypto/openpgp/errors +github.com/ProtonMail/go-crypto/openpgp/internal/algorithm +github.com/ProtonMail/go-crypto/openpgp/internal/ecc +github.com/ProtonMail/go-crypto/openpgp/internal/encoding +github.com/ProtonMail/go-crypto/openpgp/packet +github.com/ProtonMail/go-crypto/openpgp/s2k # github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d ## explicit github.com/StackExchange/wmi @@ -44,6 +62,9 @@ github.com/VividCortex/ewma # github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d ## explicit github.com/acarl005/stripansi +# github.com/acomagu/bufpipe v1.0.3 +## explicit; go 1.12 +github.com/acomagu/bufpipe # github.com/aws/aws-sdk-go v1.38.49 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws @@ -92,13 +113,10 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/bits-and-blooms/bitset v1.2.0 -## explicit; go 1.14 -github.com/bits-and-blooms/bitset # github.com/blang/semver v3.5.1+incompatible ## explicit github.com/blang/semver -# github.com/cenkalti/backoff/v4 v4.1.1 +# github.com/cenkalti/backoff/v4 v4.1.2 ## explicit; go 1.13 github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.1.2 @@ -107,7 +125,7 @@ github.com/cespare/xxhash/v2 # github.com/checkpoint-restore/checkpointctl v0.0.0-20210922093614-c31748bec9f2 ## explicit; go 1.15 github.com/checkpoint-restore/checkpointctl/lib -# github.com/checkpoint-restore/go-criu/v5 v5.1.0 +# github.com/checkpoint-restore/go-criu/v5 v5.3.0 ## explicit; go 1.13 github.com/checkpoint-restore/go-criu/v5 github.com/checkpoint-restore/go-criu/v5/rpc @@ -118,8 +136,8 @@ github.com/cheggaaa/pb/v3/termutil # github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e ## explicit github.com/chzyer/readline -# github.com/cilium/ebpf v0.6.2 -## explicit; go 1.15 +# github.com/cilium/ebpf v0.7.0 +## explicit; go 1.16 github.com/cilium/ebpf github.com/cilium/ebpf/asm github.com/cilium/ebpf/internal @@ -131,14 +149,14 @@ github.com/cilium/ebpf/link github.com/container-orchestrated-devices/container-device-interface/pkg github.com/container-orchestrated-devices/container-device-interface/pkg/cdi github.com/container-orchestrated-devices/container-device-interface/specs-go -# github.com/containerd/cgroups v1.0.2 +# github.com/containerd/cgroups v1.0.3 ## explicit; go 1.16 github.com/containerd/cgroups/stats/v1 -# github.com/containerd/console v1.0.2 +# github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.5.8 -## explicit; go 1.16 +# github.com/containerd/containerd v1.6.2 +## explicit; go 1.17 github.com/containerd/containerd/api/services/ttrpc/events/v1 github.com/containerd/containerd/api/types github.com/containerd/containerd/api/types/task @@ -146,6 +164,8 @@ github.com/containerd/containerd/cio github.com/containerd/containerd/defaults github.com/containerd/containerd/errdefs github.com/containerd/containerd/events +github.com/containerd/containerd/events/exchange +github.com/containerd/containerd/filters github.com/containerd/containerd/identifiers github.com/containerd/containerd/log github.com/containerd/containerd/namespaces @@ -154,9 +174,11 @@ github.com/containerd/containerd/pkg/cri/io github.com/containerd/containerd/pkg/cri/util github.com/containerd/containerd/pkg/dialer github.com/containerd/containerd/pkg/ioutil +github.com/containerd/containerd/pkg/shutdown github.com/containerd/containerd/pkg/ttrpcutil github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms +github.com/containerd/containerd/plugin github.com/containerd/containerd/reference/docker github.com/containerd/containerd/runtime/v2/shim github.com/containerd/containerd/runtime/v2/task @@ -172,7 +194,7 @@ github.com/containerd/fifo # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc -# github.com/containerd/stargz-snapshotter/estargz v0.9.0 +# github.com/containerd/stargz-snapshotter/estargz v0.11.3 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil @@ -197,7 +219,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.1.1 ## explicit; go 1.17 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.23.1 +# github.com/containers/buildah v1.25.1 ## explicit; go 1.13 github.com/containers/buildah github.com/containers/buildah/bind @@ -206,6 +228,9 @@ github.com/containers/buildah/copier github.com/containers/buildah/define github.com/containers/buildah/docker github.com/containers/buildah/imagebuildah +github.com/containers/buildah/internal +github.com/containers/buildah/internal/parse +github.com/containers/buildah/internal/util github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/overlay @@ -213,17 +238,24 @@ github.com/containers/buildah/pkg/parse github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/util -# github.com/containers/common v0.46.1-0.20211001143714-161e078e4c7f +# github.com/containers/common v0.47.5 ## explicit; go 1.15 github.com/containers/common/libimage github.com/containers/common/libimage/manifests +github.com/containers/common/libnetwork/cni +github.com/containers/common/libnetwork/internal/util +github.com/containers/common/libnetwork/netavark +github.com/containers/common/libnetwork/network +github.com/containers/common/libnetwork/types +github.com/containers/common/libnetwork/util github.com/containers/common/pkg/apparmor github.com/containers/common/pkg/apparmor/internal/supported github.com/containers/common/pkg/capabilities +github.com/containers/common/pkg/cgroups github.com/containers/common/pkg/cgroupv2 github.com/containers/common/pkg/chown github.com/containers/common/pkg/config -github.com/containers/common/pkg/defaultnet +github.com/containers/common/pkg/download github.com/containers/common/pkg/filters github.com/containers/common/pkg/manifests github.com/containers/common/pkg/parse @@ -239,11 +271,12 @@ github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/sysinfo github.com/containers/common/pkg/timetype github.com/containers/common/pkg/umask +github.com/containers/common/pkg/util github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.17.0 +# github.com/containers/image/v5 v5.20.0 ## explicit; go 1.13 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -257,14 +290,15 @@ github.com/containers/image/v5/docker/reference github.com/containers/image/v5/docker/tarfile github.com/containers/image/v5/image github.com/containers/image/v5/internal/blobinfocache +github.com/containers/image/v5/internal/imagedestination +github.com/containers/image/v5/internal/imagesource github.com/containers/image/v5/internal/iolimits -github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform +github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/rootless github.com/containers/image/v5/internal/streamdigest github.com/containers/image/v5/internal/tmpdir -github.com/containers/image/v5/internal/types github.com/containers/image/v5/internal/uploadreader github.com/containers/image/v5/manifest github.com/containers/image/v5/oci/archive @@ -272,6 +306,7 @@ github.com/containers/image/v5/oci/internal github.com/containers/image/v5/oci/layout github.com/containers/image/v5/openshift github.com/containers/image/v5/ostree +github.com/containers/image/v5/pkg/blobcache github.com/containers/image/v5/pkg/blobinfocache github.com/containers/image/v5/pkg/blobinfocache/boltdb github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize @@ -285,6 +320,7 @@ github.com/containers/image/v5/pkg/shortnames github.com/containers/image/v5/pkg/strslice github.com/containers/image/v5/pkg/sysregistriesv2 github.com/containers/image/v5/pkg/tlsclientconfig +github.com/containers/image/v5/sif github.com/containers/image/v5/signature github.com/containers/image/v5/storage github.com/containers/image/v5/tarball @@ -292,10 +328,10 @@ github.com/containers/image/v5/transports github.com/containers/image/v5/transports/alltransports github.com/containers/image/v5/types github.com/containers/image/v5/version -# github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b +# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a ## explicit github.com/containers/libtrust -# github.com/containers/ocicrypt v1.1.2 +# github.com/containers/ocicrypt v1.1.3 ## explicit; go 1.12 github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher @@ -376,7 +412,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.37.0 +# github.com/containers/storage v1.39.0 ## explicit; go 1.14 github.com/containers/storage github.com/containers/storage/drivers @@ -436,8 +472,8 @@ github.com/coreos/go-systemd/v22/sdjournal ## explicit; go 1.12 github.com/cpuguy83/go-md2man github.com/cpuguy83/go-md2man/md2man -# github.com/cpuguy83/go-md2man/v2 v2.0.0 -## explicit; go 1.12 +# github.com/cpuguy83/go-md2man/v2 v2.0.1 +## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man # github.com/creack/pty v1.1.17 ## explicit; go 1.13 @@ -454,7 +490,7 @@ github.com/davecgh/go-spew/spew # github.com/disiqueira/gotree/v3 v3.0.2 ## explicit; go 1.13 github.com/disiqueira/gotree/v3 -# github.com/docker/distribution v2.7.1+incompatible +# github.com/docker/distribution v2.8.1+incompatible ## explicit github.com/docker/distribution github.com/docker/distribution/digestset @@ -467,7 +503,7 @@ github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory -# github.com/docker/docker v20.10.11+incompatible +# github.com/docker/docker v20.10.14+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -503,11 +539,14 @@ github.com/docker/docker/pkg/system ## explicit; go 1.13 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-connections v0.4.0 -## explicit +# github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 +## explicit; go 1.13 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig +# github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c +## explicit +github.com/docker/go-events # github.com/docker/go-metrics v0.0.1 ## explicit; go 1.11 github.com/docker/go-metrics @@ -535,13 +574,13 @@ github.com/emirpasic/gods/lists/arraylist github.com/emirpasic/gods/trees github.com/emirpasic/gods/trees/binaryheap github.com/emirpasic/gods/utils -# github.com/fatih/color v1.9.0 +# github.com/fatih/color v1.13.0 ## explicit; go 1.13 github.com/fatih/color # github.com/fsnotify/fsnotify v1.5.4 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.7.4 +# github.com/fsouza/go-dockerclient v1.7.10 ## explicit; go 1.16 github.com/fsouza/go-dockerclient # github.com/ghodss/yaml v1.0.0 @@ -553,14 +592,15 @@ github.com/go-git/gcfg github.com/go-git/gcfg/scanner github.com/go-git/gcfg/token github.com/go-git/gcfg/types -# github.com/go-git/go-billy/v5 v5.0.0 +# github.com/go-git/go-billy/v5 v5.3.1 ## explicit; go 1.13 github.com/go-git/go-billy/v5 github.com/go-git/go-billy/v5/helper/chroot github.com/go-git/go-billy/v5/helper/polyfill +github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.2.0 +# github.com/go-git/go-git/v5 v5.4.2 ## explicit; go 1.13 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config @@ -604,11 +644,11 @@ github.com/go-git/go-git/v5/utils/merkletrie/filesystem github.com/go-git/go-git/v5/utils/merkletrie/index github.com/go-git/go-git/v5/utils/merkletrie/internal/frame github.com/go-git/go-git/v5/utils/merkletrie/noder -# github.com/go-logr/logr v1.2.1 +# github.com/go-logr/logr v1.2.2 ## explicit; go 1.16 github.com/go-logr/logr github.com/go-logr/logr/funcr -# github.com/go-logr/stdr v1.2.0 +# github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr # github.com/go-ole/go-ole v1.2.4 @@ -655,8 +695,8 @@ github.com/golang/protobuf/ptypes/wrappers github.com/gomarkdown/markdown/ast github.com/gomarkdown/markdown/html github.com/gomarkdown/markdown/parser -# github.com/google/go-cmp v0.5.6 -## explicit; go 1.8 +# github.com/google/go-cmp v0.5.7 +## explicit; go 1.11 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -675,7 +715,7 @@ github.com/google/go-querystring/query ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 +# github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 ## explicit; go 1.14 github.com/google/pprof/profile # github.com/google/renameio v1.0.1 @@ -705,8 +745,8 @@ github.com/grpc-ecosystem/grpc-gateway/utilities # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap -# github.com/hashicorp/go-cleanhttp v0.5.1 -## explicit +# github.com/hashicorp/go-cleanhttp v0.5.2 +## explicit; go 1.13 github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 @@ -741,8 +781,8 @@ github.com/ishidawataru/sctp # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit github.com/jbenet/go-context/io -# github.com/jinzhu/copier v0.3.2 -## explicit; go 1.15 +# github.com/jinzhu/copier v0.3.5 +## explicit; go 1.13 github.com/jinzhu/copier # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 @@ -750,10 +790,10 @@ github.com/jmespath/go-jmespath # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd +# github.com/kevinburke/ssh_config v1.1.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.13.6 +# github.com/klauspost/compress v1.15.1 ## explicit; go 1.15 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -773,7 +813,7 @@ github.com/lithammer/dedent github.com/manifoldco/promptui github.com/manifoldco/promptui/list github.com/manifoldco/promptui/screenbuf -# github.com/mattn/go-colorable v0.1.8 +# github.com/mattn/go-colorable v0.1.12 ## explicit; go 1.13 github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.14 @@ -788,7 +828,7 @@ github.com/mattn/go-shellwords # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/pkcs11 v1.0.3 +# github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 # github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible @@ -797,7 +837,7 @@ github.com/mistifyio/go-zfs # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.4.2 +# github.com/mitchellh/mapstructure v1.4.3 ## explicit; go 1.14 github.com/mitchellh/mapstructure # github.com/mmarkdown/mmark v2.0.40+incompatible @@ -812,8 +852,8 @@ github.com/moby/spdystream/spdy # github.com/moby/sys/mount v0.2.0 ## explicit; go 1.14 github.com/moby/sys/mount -# github.com/moby/sys/mountinfo v0.4.1 -## explicit; go 1.14 +# github.com/moby/sys/mountinfo v0.6.0 +## explicit; go 1.16 github.com/moby/sys/mountinfo # github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 ## explicit; go 1.13 @@ -828,17 +868,12 @@ github.com/modern-go/reflect2 # github.com/morikuni/aec v1.0.0 ## explicit github.com/morikuni/aec -# github.com/mtrmac/gpgme v0.1.2 -## explicit; go 1.11 -github.com/mtrmac/gpgme # github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 ## explicit github.com/nozzle/throttler # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/onsi/ginkgo v1.16.5 -## explicit; go 1.16 # github.com/onsi/ginkgo/v2 v2.1.4 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 @@ -876,12 +911,12 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 -## explicit +# github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 +## explicit; go 1.11 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.0.3 -## explicit; go 1.13 +# github.com/opencontainers/runc v1.1.0 +## explicit; go 1.16 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/cgroups/devices @@ -907,20 +942,20 @@ github.com/opencontainers/runtime-tools/generate github.com/opencontainers/runtime-tools/generate/seccomp github.com/opencontainers/runtime-tools/specerror github.com/opencontainers/runtime-tools/validate -# github.com/opencontainers/selinux v1.9.1 +# github.com/opencontainers/selinux v1.10.0 ## explicit; go 1.13 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 -## explicit +# github.com/openshift/imagebuilder v1.2.3 +## explicit; go 1.16 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command github.com/openshift/imagebuilder/dockerfile/parser github.com/openshift/imagebuilder/signal github.com/openshift/imagebuilder/strslice -# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 +# github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f ## explicit github.com/ostreedev/ostree-go/pkg/glibobject github.com/ostreedev/ostree-go/pkg/otbuiltin @@ -938,7 +973,10 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.11.0 +# github.com/proglottis/gpgme v0.1.1 +## explicit; go 1.11 +github.com/proglottis/gpgme +# github.com/prometheus/client_golang v1.11.1 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -978,8 +1016,8 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy github.com/rootless-containers/rootlesskit/pkg/port/portutil -# github.com/russross/blackfriday v1.5.2 -## explicit +# github.com/russross/blackfriday v1.6.0 +## explicit; go 1.13 github.com/russross/blackfriday # github.com/russross/blackfriday/v2 v2.1.0 ## explicit @@ -987,10 +1025,10 @@ github.com/russross/blackfriday/v2 # github.com/saschagrunert/go-modiff v1.3.0 ## explicit; go 1.15 github.com/saschagrunert/go-modiff/pkg/modiff -# github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf +# github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/sergi/go-diff v1.1.0 +# github.com/sergi/go-diff v1.2.0 ## explicit; go 1.12 github.com/sergi/go-diff/diffmatchpatch # github.com/shirou/gopsutil/v3 v3.20.12 @@ -1003,8 +1041,8 @@ github.com/sirupsen/logrus # github.com/soheilhy/cmux v0.1.5 ## explicit; go 1.11 github.com/soheilhy/cmux -# github.com/spf13/cobra v1.2.1 -## explicit; go 1.14 +# github.com/spf13/cobra v1.4.0 +## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 @@ -1019,9 +1057,12 @@ github.com/spiegel-im-spiegel/go-cvss/v3/metric # github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 ## explicit github.com/stefanberger/go-pkcs11uri -# github.com/stretchr/testify v1.7.0 +# github.com/stretchr/testify v1.7.1 ## explicit; go 1.13 github.com/stretchr/testify/assert +# github.com/sylabs/sif/v2 v2.3.2 +## explicit; go 1.17 +github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit github.com/syndtr/gocapability/capability @@ -1042,7 +1083,7 @@ github.com/urfave/cli/v2 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v7 v7.1.5 +# github.com/vbauerster/mpb/v7 v7.3.2 ## explicit; go 1.14 github.com/vbauerster/mpb/v7 github.com/vbauerster/mpb/v7/cwriter @@ -1058,10 +1099,10 @@ github.com/vishvananda/netns # github.com/xanzy/go-gitlab v0.43.0 ## explicit; go 1.13 github.com/xanzy/go-gitlab -# github.com/xanzy/ssh-agent v0.2.1 -## explicit +# github.com/xanzy/ssh-agent v0.3.1 +## explicit; go 1.16 github.com/xanzy/ssh-agent -# github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b +# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb ## explicit github.com/xeipuuv/gojsonpointer # github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 @@ -1099,18 +1140,19 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.4.0 go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 -## explicit; go 1.15 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 +## explicit; go 1.16 +go.opentelemetry.io/otel/exporters/otlp/internal/retry +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 +## explicit; go 1.16 go.opentelemetry.io/otel/exporters/otlp/otlptrace -go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/connection go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig -go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.2.0 -## explicit; go 1.15 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 +## explicit; go 1.16 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc -# go.opentelemetry.io/otel/sdk v1.2.0 -## explicit; go 1.15 +# go.opentelemetry.io/otel/sdk v1.3.0 +## explicit; go 1.16 go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/resource @@ -1118,13 +1160,13 @@ go.opentelemetry.io/otel/sdk/trace # go.opentelemetry.io/otel/trace v1.3.0 ## explicit; go 1.16 go.opentelemetry.io/otel/trace -# go.opentelemetry.io/proto/otlp v0.10.0 +# go.opentelemetry.io/proto/otlp v0.11.0 ## explicit; go 1.14 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 +# golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 ## explicit; go 1.17 golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -1169,7 +1211,7 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f +# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 ## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1241,7 +1283,7 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20211005153810-c76a74d43a8e => google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 +# google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa => google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 ## explicit; go 1.11 google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/errdetails @@ -1353,7 +1395,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.22.2 => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20211207180839-ab69524f795c +# k8s.io/api v0.22.5 => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20211207180839-ab69524f795c ## explicit; go 1.16 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 @@ -1400,7 +1442,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.22.2 => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20211207180839-ab69524f795c +# k8s.io/apimachinery v0.22.5 => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20211207180839-ab69524f795c ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1452,13 +1494,13 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.20.6 => k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20211207180839-ab69524f795c +# k8s.io/apiserver v0.22.5 => k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20211207180839-ab69524f795c ## explicit; go 1.16 k8s.io/apiserver/pkg/endpoints/responsewriter k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/util/feature k8s.io/apiserver/pkg/util/wsstream -# k8s.io/client-go v0.22.2 => k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20211207180839-ab69524f795c +# k8s.io/client-go v0.22.5 => k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20211207180839-ab69524f795c ## explicit; go 1.16 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1682,7 +1724,7 @@ k8s.io/client-go/util/workqueue # k8s.io/cloud-provider v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20211207180839-ab69524f795c ## explicit; go 1.16 k8s.io/cloud-provider -# k8s.io/component-base v0.20.6 => k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20211207180839-ab69524f795c +# k8s.io/component-base v0.22.5 => k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20211207180839-ab69524f795c ## explicit; go 1.16 k8s.io/component-base/featuregate k8s.io/component-base/metrics @@ -1691,7 +1733,7 @@ k8s.io/component-base/version # k8s.io/component-helpers v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20211207180839-ab69524f795c ## explicit; go 1.16 k8s.io/component-helpers/node/util/sysctl -# k8s.io/cri-api v0.23.0-alpha.3 => k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210928072042-90051602459c +# k8s.io/cri-api v0.23.1 => k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20210928072042-90051602459c ## explicit; go 1.16 k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/apis/runtime/v1alpha2