diff --git a/go.mod b/go.mod index 88df65f4430..8213b19f7db 100644 --- a/go.mod +++ b/go.mod @@ -14,9 +14,9 @@ require ( github.com/containernetworking/cni v0.8.1 github.com/containernetworking/plugins v0.9.1 github.com/containers/buildah v1.22.3 - github.com/containers/common v0.43.2 + github.com/containers/common v0.45.0 github.com/containers/conmon v2.0.20+incompatible - github.com/containers/image/v5 v5.15.2 + github.com/containers/image/v5 v5.16.0 github.com/containers/ocicrypt v1.1.2 github.com/containers/podman/v3 v3.3.1 github.com/containers/storage v1.36.0 @@ -42,7 +42,7 @@ require ( github.com/onsi/ginkgo v1.16.4 github.com/onsi/gomega v1.16.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.2-0.20210708142037-083f635f2b04 + github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 github.com/opencontainers/runc v1.0.2 github.com/opencontainers/runtime-spec v1.0.3-0.20210709190330-896175883324 github.com/opencontainers/runtime-tools v0.9.1-0.20210326182921-59cdde06764b diff --git a/go.sum b/go.sum index c274ca4892e..882dc3a5ad4 100644 --- a/go.sum +++ b/go.sum @@ -332,15 +332,15 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD github.com/containers/buildah v1.22.3 h1:RomxwUa24jMcqzXQetpw4wGMfNlNZLhc9qwyoWHblwc= github.com/containers/buildah v1.22.3/go.mod h1:JVXRyx5Rkp5w5jwvaXe45kuHtyoxpERMjXrR45+3Wfg= github.com/containers/common v0.42.1/go.mod h1:AaF3ipZfgezsctDuhzLkq4Vl+LkEy7J74ikh2HSXDsg= -github.com/containers/common v0.43.2 h1:oSP5d5sDrq7OkoqLPVrLpi1LZOAwpTwOZXgPDHfmD0E= -github.com/containers/common v0.43.2/go.mod h1:BAoVyRYlxKZKAYpHcFMdrXlIZyzbJp9NwKTgadTd/Dg= +github.com/containers/common v0.45.0 h1:QdgGp/lq1FF6FfbI/3f0GAKfLrGW8F78SspiS1uQPN8= +github.com/containers/common v0.45.0/go.mod h1:zxv7KjdYddSGoWuLUVp6eSb++Ow1zmSMB2jwxuNB4cU= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/image/v5 v5.10.4/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII= github.com/containers/image/v5 v5.14.0/go.mod h1:SxiBKOcKuT+4yTjD0AskjO+UwFvNcVOJ9qlAw1HNSPU= -github.com/containers/image/v5 v5.15.0/go.mod h1:gzdBcooi6AFdiqfzirUqv90hUyHyI0MMdaqKzACKr2s= -github.com/containers/image/v5 v5.15.2 h1:DKicmVr0h1HGkzs9muoErX+fVbV9sV9W5TyMy5perLE= github.com/containers/image/v5 v5.15.2/go.mod h1:8jejVSzTDfyPwr/HXp9rri34n/vbdavYk6IzTiB3TBw= +github.com/containers/image/v5 v5.16.0 h1:WQcNSzb7+ngS2cfynx0vUwhk+scpgiKlldVcsF8GPbI= +github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= @@ -356,10 +356,9 @@ github.com/containers/psgo v1.5.2/go.mod h1:2ubh0SsreMZjSXW1Hif58JrEcFudQyIy9EzP github.com/containers/storage v1.23.5/go.mod h1:ha26Q6ngehFNhf3AWoXldvAvwI4jFe3ETQAf/CeZPyM= github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= github.com/containers/storage v1.32.6/go.mod h1:mdB+b89p+jU8zpzLTVXA0gWMmIo0WrkfGMh1R8O2IQw= -github.com/containers/storage v1.33.0/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY= github.com/containers/storage v1.33.1/go.mod h1:FUZPF4nJijX8ixdhByZJXf02cvbyLi6dyDwXdIe8QVY= -github.com/containers/storage v1.34.0/go.mod h1:t6I+hTgPU0/tVxQ75vw406wDi/TXwYBqZp4QZV9N7b8= github.com/containers/storage v1.34.1/go.mod h1:FY2TcbfgCLMU4lYoKnlZeZXeH353TOTbpDEA+sAcqAY= +github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20= github.com/containers/storage v1.36.0 h1:OelxllCW19tnNngYuZw2ty/zLabVMG5rSs3KSwO1Lzc= github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= @@ -829,7 +828,6 @@ github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= @@ -931,8 +929,9 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= @@ -1011,7 +1010,6 @@ github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDs github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1024,8 +1022,8 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20200206005212-79b036d80240/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20210708142037-083f635f2b04 h1:FAcfLZ/aXS6exuOySekrOT/GjKPt6988dxiF/ENj828= -github.com/opencontainers/image-spec v1.0.2-0.20210708142037-083f635f2b04/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 h1:TVzvdjOalkJBNkbpPVMAr4KV9QRf2IjfxdyxwAK78Gs= +github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.0.0-20190425234816-dae70e8efea4/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1055,7 +1053,6 @@ github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwy github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.8.3/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= github.com/opencontainers/selinux v1.8.5 h1:OkT6bMHOQ1JQQO4ihjQ49sj0+wciDcjziSVTRn8VeTA= github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo= @@ -1287,8 +1284,9 @@ github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlI github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= github.com/vbauerster/mpb/v6 v6.0.4/go.mod h1:a/+JT57gqh6Du0Ay5jSR+uBMfXGdlR7VQlGP52fJxLM= -github.com/vbauerster/mpb/v7 v7.0.3 h1:NfX0pHWhlDTev15M/C3qmSTM1EiIjcS+/d6qS6H4FnI= github.com/vbauerster/mpb/v7 v7.0.3/go.mod h1:NXGsfPGx6G2JssqvEcULtDqUrxuuYs4llpv8W6ZUpzk= +github.com/vbauerster/mpb/v7 v7.1.3 h1:VJkiLuuBs/re5SCHLVkYOPYAs+1jagk5QIDHgAXLVVA= +github.com/vbauerster/mpb/v7 v7.1.3/go.mod h1:X5GlohZw2fIpypMXWaKart+HGSAjpz49skxkDk+ZL7c= github.com/vdemeester/k8s-pkg-credentialprovider v1.18.1-0.20201019120933-f1d16962a4db/go.mod h1:grWy0bkr1XO6hqbaaCKaPXqkBVlMGHYG6PGykktwbJc= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= @@ -1636,6 +1634,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55 h1:rw6UNGRMfarCepjI8qOepea/SXwIBVfTKjztZ5gBbq4= golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index a44f098ad19..42d3690b949 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -12,6 +12,7 @@ import ( "github.com/containers/common/pkg/retry" "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/signature" storageTransport "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" @@ -40,6 +41,10 @@ type CopyOptions struct { // Allows for customizing the destination reference lookup. This can // be used to use custom blob caches. DestinationLookupReferenceFunc LookupReferenceFunc + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int // containers-auth.json(5) file to use when authenticating against // container registries. @@ -65,6 +70,8 @@ type CopyOptions struct { // types. Short forms (e.g., oci, v2s2) used by some tools are not // supported. ManifestMIMEType string + // Accept uncompressed layers when copying OCI images. + OciAcceptUncompressedLayers bool // If OciEncryptConfig is non-nil, it indicates that an image should be // encrypted. The encryption options is derived from the construction // of EncryptConfig object. Note: During initial encryption process of @@ -242,6 +249,17 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.systemContext.DockerCertPath = options.CertDirPath } + if options.CompressionFormat != nil { + c.systemContext.CompressionFormat = options.CompressionFormat + } + + if options.CompressionLevel != nil { + c.systemContext.CompressionLevel = options.CompressionLevel + } + + // NOTE: for the sake of consistency it's called Oci* in the CopyOptions. + c.systemContext.OCIAcceptUncompressedLayers = options.OciAcceptUncompressedLayers + policy, err := signature.DefaultPolicy(c.systemContext) if err != nil { return nil, err diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index edfd095a015..2cde098468f 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -52,6 +52,10 @@ func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, error) { // diskUsageForImage returns the disk-usage baseistics for the specified image. func diskUsageForImage(ctx context.Context, image *Image, tree *layerTree) ([]ImageDiskUsage, error) { + if err := image.isCorrupted(""); err != nil { + return nil, err + } + base := ImageDiskUsage{ ID: image.ID(), Created: image.Created(), diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index b4623a87099..8456d5280a8 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -74,7 +74,10 @@ func (i *Image) isCorrupted(name string) error { } if _, err := ref.NewImage(context.Background(), nil); err != nil { - return errors.Errorf("Image %s exists in local storage but may be corrupted: %v", name, err) + if name == "" { + name = i.ID()[:12] + } + return errors.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err) } return nil } @@ -712,10 +715,18 @@ func (i *Image) Size() (int64, error) { return i.runtime.store.ImageSize(i.ID()) } +// HasDifferentDigestOptions allows for customizing the check if another +// (remote) image has a different digest. +type HasDifferentDigestOptions struct { + // containers-auth.json(5) file to use when authenticating against + // container registries. + AuthFilePath string +} + // HasDifferentDigest returns true if the image specified by `remoteRef` has a // different digest than the local one. This check can be useful to check for // updates on remote registries. -func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageReference) (bool, error) { +func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageReference, options *HasDifferentDigestOptions) (bool, error) { // We need to account for the arch that the image uses. It seems // common on ARM to tweak this option to pull the correct image. See // github.com/containers/podman/issues/6613. @@ -735,6 +746,14 @@ func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageRef sys.VariantChoice = inspectInfo.Variant } + if options != nil && options.AuthFilePath != "" { + sys.AuthFilePath = options.AuthFilePath + } + + return i.hasDifferentDigestWithSystemContext(ctx, remoteRef, sys) +} + +func (i *Image) hasDifferentDigestWithSystemContext(ctx context.Context, remoteRef types.ImageReference, sys *types.SystemContext) (bool, error) { remoteImg, err := remoteRef.NewImage(ctx, sys) if err != nil { return false, err diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 8712a13fd64..1c322c37e84 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -561,7 +561,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str } if pullPolicy == config.PullPolicyNewer && localImage != nil { - isNewer, err := localImage.HasDifferentDigest(ctx, srcRef) + isNewer, err := localImage.hasDifferentDigestWithSystemContext(ctx, srcRef, c.systemContext) if err != nil { pullErrors = append(pullErrors, err) continue diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 26a04dad58e..42461014d67 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -161,8 +161,14 @@ type LookupImageOptions struct { // If set, do not look for items/instances in the manifest list that // match the current platform but return the manifest list as is. + // only check for manifest list, return ErrNotAManifestList if not found. lookupManifest bool + // If matching images resolves to a manifest list, return manifest list + // instead of resolving to image instance, if manifest list is not found + // try resolving image. + ManifestList bool + // If the image resolves to a manifest list, we usually lookup a // matching instance and error if none could be found. In this case, // just return the manifest list. Required for image removal. @@ -305,11 +311,14 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo } return nil, err } - if options.lookupManifest { + if options.lookupManifest || options.ManifestList { if isManifestList { return image, nil } - return nil, errors.Wrapf(ErrNotAManifestList, candidate) + // return ErrNotAManifestList if lookupManifest is set otherwise try resolving image. + if options.lookupManifest { + return nil, errors.Wrapf(ErrNotAManifestList, candidate) + } } if isManifestList { @@ -532,6 +541,11 @@ type RemoveImagesOptions struct { // using a removed image. Use RemoveContainerFunc for a custom logic. // If set, all child images will be removed as well. Force bool + // LookupManifest will expect all specified names to be manifest lists (no instance look up). + // This allows for removing manifest lists. + // By default, RemoveImages will attempt to resolve to a manifest instance matching + // the local platform (i.e., os, architecture, variant). + LookupManifest bool // RemoveContainerFunc allows for a custom logic for removing // containers using a specific image. By default, all containers in // the local containers storage will be removed (if Force is set). @@ -591,13 +605,22 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem toDelete := []string{} // Look up images in the local containers storage and fill out // toDelete and the deleteMap. + switch { case len(names) > 0: + // prepare lookupOptions + var lookupOptions *LookupImageOptions + if options.LookupManifest { + // LookupManifest configured as true make sure we only remove manifests and no referenced images. + lookupOptions = &LookupImageOptions{lookupManifest: true} + } else { + lookupOptions = &LookupImageOptions{returnManifestIfNoInstance: true} + } // Look up the images one-by-one. That allows for removing // images that have been looked up successfully while reporting // lookup errors at the end. for _, name := range names { - img, resolvedName, err := r.LookupImage(name, &LookupImageOptions{returnManifestIfNoInstance: true}) + img, resolvedName, err := r.LookupImage(name, lookupOptions) if err != nil { appendError(err) continue diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index e554bac7077..c1f63577a0e 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -167,7 +167,7 @@ type ContainersConfig struct { // RootlessNetworking depicts the "kind" of networking for rootless // containers. Valid options are `slirp4netns` and `cni`. Default is - // `slirp4netns` + // `slirp4netns` on Linux, and `cni` on non-Linux OSes. RootlessNetworking string `toml:"rootless_networking,omitempty"` // SeccompProfile is the seccomp.json profile path which is used as the @@ -234,6 +234,13 @@ type EngineConfig struct { // EventsLogger determines where events should be logged. EventsLogger string `toml:"events_logger,omitempty"` + // graphRoot internal stores the location of the graphroot + graphRoot string + + // HelperBinariesDir is a list of directories which are used to search for + // helper binaries. + HelperBinariesDir []string `toml:"helper_binaries_dir"` + // configuration files. When the same filename is present in in // multiple directories, the file in the directory listed last in // this slice takes precedence. @@ -328,7 +335,7 @@ type EngineConfig struct { // ActiveService index to Destinations added v2.0.3 ActiveService string `toml:"active_service,omitempty"` - // Destinations mapped by service Names + // ServiceDestinations mapped by service Names ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` // RuntimePath is the path to OCI runtime binary for launching containers. @@ -372,6 +379,10 @@ type EngineConfig struct { // containers/storage. As such this is not exposed via the config file. StateType RuntimeStateStore `toml:"-"` + // ServiceTimeout is the number of seconds to wait without a connection + // before the `podman system service` times out and exits + ServiceTimeout uint `toml:"service_timeout,omitempty"` + // StaticDir is the path to a persistent directory to store container // files. StaticDir string `toml:"static_dir,omitempty"` @@ -380,6 +391,12 @@ type EngineConfig struct { // before sending kill signal. StopTimeout uint `toml:"stop_timeout,omitempty"` + // ImageCopyTmpDir is the default location for storing temporary + // container image content, Can be overridden with the TMPDIR + // environment variable. If you specify "storage", then the + // location of the container/storage tmp directory will be used. + ImageCopyTmpDir string `toml:"image_copy_tmp_dir,omitempty"` + // TmpDir is the path to a temporary directory to store per-boot container // files. Must be stored in a tmpfs. TmpDir string `toml:"tmp_dir,omitempty"` @@ -778,7 +795,7 @@ func (c *NetworkConfig) Validate() error { } } - if stringsEq(c.CNIPluginDirs, cniBinDir) { + if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) { return nil } @@ -1126,3 +1143,40 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) { } return "", "", errors.New("no service destination configured") } + +// FindHelperBinary will search the given binary name in the configured directories. +// If searchPATH is set to true it will also search in $PATH. +func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) { + for _, path := range c.Engine.HelperBinariesDir { + fullpath := filepath.Join(path, name) + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + if searchPATH { + return exec.LookPath(name) + } + if len(c.Engine.HelperBinariesDir) == 0 { + return "", errors.Errorf("could not find %q because there are no helper binary directories configured", name) + } + return "", errors.Errorf("could not find %q in one of %v", name, c.Engine.HelperBinariesDir) +} + +// ImageCopyTmpDir default directory to store tempory image files during copy +func (c *Config) ImageCopyTmpDir() (string, error) { + if path, found := os.LookupEnv("TMPDIR"); found { + return path, nil + } + switch c.Engine.ImageCopyTmpDir { + case "": + return "", nil + case "storage": + return filepath.Join(c.Engine.graphRoot, "tmp"), nil + default: + if filepath.IsAbs(c.Engine.ImageCopyTmpDir) { + return c.Engine.ImageCopyTmpDir, nil + } + } + + return "", errors.Errorf("invalid image_copy_tmp_dir value %q (relative paths are not accepted)", c.Engine.ImageCopyTmpDir) +} diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index c0722ec7fdb..5abb51f30cd 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -15,3 +15,16 @@ func customConfigFile() (string, error) { func ifRootlessConfigPath() (string, error) { return rootlessConfigPath() } + +var defaultHelperBinariesDir = []string{ + // Homebrew install paths + "/usr/local/opt/podman/libexec", + "/opt/homebrew/bin", + "/opt/homebrew/opt/podman/libexec", + "/usr/local/bin", + // default paths + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/libexec/podman", + "/usr/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go index fac9e22835d..da0ae871a81 100644 --- a/vendor/github.com/containers/common/pkg/config/config_linux.go +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -35,3 +35,10 @@ func ifRootlessConfigPath() (string, error) { } return "", nil } + +var defaultHelperBinariesDir = []string{ + "/usr/local/libexec/podman", + "/usr/local/lib/podman", + "/usr/libexec/podman", + "/usr/lib/podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go index 28e8471f2ad..dbe7ba00d60 100644 --- a/vendor/github.com/containers/common/pkg/config/config_windows.go +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -13,3 +13,7 @@ func customConfigFile() (string, error) { func ifRootlessConfigPath() (string, error) { return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil } + +var defaultHelperBinariesDir = []string{ + "C:\\Program Files\\RedHat\\Podman", +} diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 0068a9a174f..7c72ec79fa3 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -262,7 +262,13 @@ default_sysctls = [ # Path to directory where CNI plugin binaries are located. # -#cni_plugin_dirs = ["/usr/libexec/cni"] +#cni_plugin_dirs = [ +# "/usr/local/libexec/cni", +# "/usr/libexec/cni", +# "/usr/local/lib/cni", +# "/usr/lib/cni", +# "/opt/cni/bin", +#] # The network name of the default CNI network to attach pods to. # @@ -335,6 +341,15 @@ default_sysctls = [ # #events_logger = "journald" +# A is a list of directories which are used to search for helper binaries. +# +#helper_binaries_dir = [ +# "/usr/local/libexec/podman", +# "/usr/local/lib/podman", +# "/usr/libexec/podman", +# "/usr/lib/podman", +#] + # Path to OCI hooks directories for automatically executed hooks. # #hooks_dir = [ @@ -407,7 +422,7 @@ default_sysctls = [ # Default options to pass to the slirp4netns binary. # For example "allow_host_loopback=true" # -#network_cmd_options = [] +#network_cmd_options = ["enable_ipv6=true",] # Whether to use chroot instead of pivot_root in the runtime # @@ -436,15 +451,25 @@ default_sysctls = [ # List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # -#runtime_supports_json = ["crun", "runc", "kata", "runsc"] +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"] # List of the OCI runtimes that supports running containers with KVM Separation. # -#runtime_supports_kvm = ["kata"] +#runtime_supports_kvm = ["kata", "krun"] # List of the OCI runtimes that supports running containers without cgroups. # -#runtime_supports_nocgroups = ["crun"] +#runtime_supports_nocgroups = ["crun", "krun"] + +# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment +# variable. If you specify "storage", then the location of the +# container/storage tmp directory will be used. +# image_copy_tmp_dir="/var/tmp" + +# Number of seconds to wait without a connection +# before the `podman system service` times out and exits +# +#service_timeout = 5 # Directory for persistent engine files (database, etc) # By default, this will be configured relative to where the containers/storage @@ -483,7 +508,7 @@ default_sysctls = [ # #volume_path = "/var/lib/containers/storage/volumes" -# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, etc) +# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc) [engine.runtimes] #crun = [ # "/usr/bin/crun", @@ -526,6 +551,11 @@ default_sysctls = [ # "/run/current-system/sw/bin/runsc", #] +#krun = [ +# "/usr/bin/krun", +# "/usr/local/bin/krun", +#] + [engine.volume_plugins] #testplugin = "/run/podman/plugins/test.sock" diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 66531a2ba12..34d17d72c7c 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -76,16 +76,14 @@ var ( "CAP_SYS_CHROOT", } - cniBinDir = []string{ + // It may seem a bit unconventional, but it is necessary to do so + DefaultCNIPluginDirs = []string{ + "/usr/local/libexec/cni", "/usr/libexec/cni", - "/usr/lib/cni", "/usr/local/lib/cni", + "/usr/lib/cni", "/opt/cni/bin", } - - // DefaultRootlessNetwork is the kind of of rootless networking - // for containers - DefaultRootlessNetwork = "slirp4netns" ) const ( @@ -195,19 +193,18 @@ func DefaultConfig() (*Config, error) { NoHosts: false, PidsLimit: DefaultPidsLimit, PidNS: "private", - RootlessNetworking: DefaultRootlessNetwork, + RootlessNetworking: getDefaultRootlessNetwork(), ShmSize: DefaultShmSize, TZ: "", Umask: "0022", UTSNS: "private", - UserNS: "host", UserNSSize: DefaultUserNSSize, }, Network: NetworkConfig{ DefaultNetwork: "podman", DefaultSubnet: DefaultSubnet, NetworkConfigDir: cniConfig, - CNIPluginDirs: cniBinDir, + CNIPluginDirs: DefaultCNIPluginDirs, }, Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), @@ -246,9 +243,12 @@ func defaultConfigFromMemory() (*EngineConfig, error) { logrus.Warnf("Storage configuration is unset - using hardcoded default graph root %q", _defaultGraphRoot) storeOpts.GraphRoot = _defaultGraphRoot } + c.graphRoot = storeOpts.GraphRoot + c.ImageCopyTmpDir = "/var/tmp" c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod") c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes") + c.HelperBinariesDir = defaultHelperBinariesDir c.HooksDir = DefaultHooksDirs c.ImageDefaultTransport = _defaultTransport c.StateType = BoltDBStateStore @@ -256,8 +256,11 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.ImageBuildFormat = "oci" c.CgroupManager = defaultCgroupManager() + c.ServiceTimeout = uint(5) c.StopTimeout = uint(10) - + c.NetworkCmdOptions = []string{ + "enable_ipv6=true", + } c.Remote = isRemote() c.OCIRuntimes = map[string][]string{ "crun": { @@ -298,6 +301,10 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "/sbin/runsc", "/run/current-system/sw/bin/runsc", }, + "krun": { + "/usr/bin/krun", + "/usr/local/bin/krun", + }, } // Needs to be called after populating c.OCIRuntimes c.OCIRuntime = c.findRuntime() @@ -321,9 +328,10 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "runc", "kata", "runsc", + "krun", } - c.RuntimeSupportsNoCgroups = []string{"crun"} - c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc"} + c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} + c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"} c.InitPath = DefaultInitPath c.NoPivotRoot = false diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index f61d9ba54b2..c68c0b130fc 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -13,6 +13,12 @@ const ( oldMaxSize = uint64(1048576) ) +// getDefaultRootlessNetwork returns the default rootless network configuration. +// It is "slirp4netns" for Linux. +func getDefaultRootlessNetwork() string { + return "slirp4netns" +} + // getDefaultProcessLimits returns the nproc for the current process in ulimits format // Note that nfile sometimes cannot be set to unlimited, and the limit is hardcoded // to (oldMaxSize) 1048576 (2^20), see: http://stackoverflow.com/a/1213069/1811501 diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index 1ae1dd12cf3..e38fb810de8 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -2,6 +2,12 @@ package config +// getDefaultRootlessNetwork returns the default rootless network configuration. +// It is "cni" for non-Linux OSes (to better support `podman-machine` usecases). +func getDefaultRootlessNetwork() string { + return "cni" +} + // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { return false, nil diff --git a/vendor/github.com/containers/common/pkg/config/util_supported.go b/vendor/github.com/containers/common/pkg/config/util_supported.go index 417e3a37528..33e4a9e8fc2 100644 --- a/vendor/github.com/containers/common/pkg/config/util_supported.go +++ b/vendor/github.com/containers/common/pkg/config/util_supported.go @@ -48,7 +48,7 @@ func getRuntimeDir() (string, error) { } } if runtimeDir == "" { - tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("run-%s", uid)) + tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) if err := os.MkdirAll(tmpDir, 0700); err != nil { logrus.Debugf("unable to make temp dir %v", err) } diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go index 725e0bfc760..cf333744c53 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -236,6 +236,7 @@ func DefaultProfile() *Seccomp { "madvise", "mbind", "memfd_create", + "memfd_secret", "mincore", "mkdir", "mkdirat", diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json index eeb41d5d826..c009134e3cc 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -242,6 +242,7 @@ "madvise", "mbind", "memfd_create", + "memfd_secret", "mincore", "mkdir", "mkdirat", diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index 2e7802369c2..aea983cb15a 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -24,8 +24,8 @@ const secretIDLength = 25 // errInvalidPath indicates that the secrets path is invalid var errInvalidPath = errors.New("invalid secrets path") -// errNoSuchSecret indicates that the secret does not exist -var errNoSuchSecret = errors.New("no such secret") +// ErrNoSuchSecret indicates that the secret does not exist +var ErrNoSuchSecret = errors.New("no such secret") // errSecretNameInUse indicates that the secret name is already in use var errSecretNameInUse = errors.New("secret name in use") @@ -152,7 +152,7 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, driv newID = newID[0:secretIDLength] _, err := s.lookupSecret(newID) if err != nil { - if errors.Cause(err) == errNoSuchSecret { + if errors.Cause(err) == ErrNoSuchSecret { secr.ID = newID break } else { diff --git a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go index 1395d103cb4..0c4929995b2 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secretsdb.go +++ b/vendor/github.com/containers/common/pkg/secrets/secretsdb.go @@ -71,14 +71,14 @@ func (s *SecretsManager) getNameAndID(nameOrID string) (name, id string, err err name, id, err = s.getExactNameAndID(nameOrID) if err == nil { return name, id, nil - } else if errors.Cause(err) != errNoSuchSecret { + } else if errors.Cause(err) != ErrNoSuchSecret { return "", "", err } // ID prefix may have been given, iterate through all IDs. // ID and partial ID has a max length of 25, so we return if its greater than that. if len(nameOrID) > secretIDLength { - return "", "", errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) + return "", "", errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID) } exists := false var foundID, foundName string @@ -96,7 +96,7 @@ func (s *SecretsManager) getNameAndID(nameOrID string) (name, id string, err err if exists { return foundName, foundID, nil } - return "", "", errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) + return "", "", errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID) } // getExactNameAndID takes a secret's name or ID and returns both its name and full ID. @@ -115,7 +115,7 @@ func (s *SecretsManager) getExactNameAndID(nameOrID string) (name, id string, er return name, id, nil } - return "", "", errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) + return "", "", errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID) } // exactSecretExists checks if the secret exists, given a name or ID @@ -123,7 +123,7 @@ func (s *SecretsManager) getExactNameAndID(nameOrID string) (name, id string, er func (s *SecretsManager) exactSecretExists(nameOrID string) (bool, error) { _, _, err := s.getExactNameAndID(nameOrID) if err != nil { - if errors.Cause(err) == errNoSuchSecret { + if errors.Cause(err) == ErrNoSuchSecret { return false, nil } return false, err @@ -158,7 +158,7 @@ func (s *SecretsManager) lookupSecret(nameOrID string) (*Secret, error) { return &secret, nil } - return nil, errors.Wrapf(errNoSuchSecret, "no secret with name or id %q", nameOrID) + return nil, errors.Wrapf(ErrNoSuchSecret, "no secret with name or id %q", nameOrID) } // Store creates a new secret in the secrets database. diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 572fe9bbd3c..281861a9f13 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.43.2" +const Version = "0.45.0" diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index b4ff8aa1094..e1649ba8e18 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -36,14 +36,6 @@ import ( "golang.org/x/term" ) -type digestingReader struct { - source io.Reader - digester digest.Digester - expectedDigest digest.Digest - validationFailed bool - validationSucceeded bool -} - var ( // ErrDecryptParamsMissing is returned if there is missing decryption parameters ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present") @@ -51,6 +43,10 @@ var ( // maxParallelDownloads is used to limit the maximum number of parallel // downloads. Let's follow Firefox by limiting it to 6. maxParallelDownloads = uint(6) + + // defaultCompressionFormat is used if the destination transport requests + // compression, and the user does not explicitly instruct us to use an algorithm. + defaultCompressionFormat = &compression.Gzip ) // compressionBufferSize is the buffer size used to compress a blob @@ -64,66 +60,22 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, } -// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error -// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. -// (neither is set if EOF is never reached). -func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { - var digester digest.Digester - if err := expectedDigest.Validate(); err != nil { - return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) - } - digestAlgorithm := expectedDigest.Algorithm() - if !digestAlgorithm.Available() { - return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) - } - digester = digestAlgorithm.Digester() - - return &digestingReader{ - source: source, - digester: digester, - expectedDigest: expectedDigest, - validationFailed: false, - }, nil -} - -func (d *digestingReader) Read(p []byte) (int, error) { - n, err := d.source.Read(p) - if n > 0 { - if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { - // Coverage: This should not happen, the hash.Hash interface requires - // d.digest.Write to never return an error, and the io.Writer interface - // requires n2 == len(input) if no error is returned. - return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n) - } - } - if err == io.EOF { - actualDigest := d.digester.Digest() - if actualDigest != d.expectedDigest { - d.validationFailed = true - return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) - } - d.validationSucceeded = true - } - return n, err -} - // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache internalblobinfocache.BlobInfoCache2 - copyInParallel bool - compressionFormat compressiontypes.Algorithm - compressionLevel *int - ociDecryptConfig *encconfig.DecryptConfig - ociEncryptConfig *encconfig.EncryptConfig - maxParallelDownloads uint - downloadForeignLayers bool + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache internalblobinfocache.BlobInfoCache2 + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + ociDecryptConfig *encconfig.DecryptConfig + ociEncryptConfig *encconfig.EncryptConfig + concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs + downloadForeignLayers bool } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -196,7 +148,10 @@ type Options struct { // encrypted if non-nil. If nil, it does not attempt to decrypt an image. OciDecryptConfig *encconfig.DecryptConfig - // MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0. + // A weighted semaphore to limit the amount of concurrently copied layers and configs. Applies to all copy operations using the semaphore. If set, MaxParallelDownloads is ignored. + ConcurrentBlobCopiesSemaphore *semaphore.Weighted + + // MaxParallelDownloads indicates the maximum layers to pull at the same time. Applies to a single copy operation. A reasonable default is used if this is left as 0. Ignored if ConcurrentBlobCopiesSemaphore is set. MaxParallelDownloads uint // When OptimizeDestinationImageAlreadyExists is set, optimize the copy assuming that the destination image already @@ -269,7 +224,6 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if !isTTY(reportWriter) { progressOutput = ioutil.Discard } - copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() c := &copier{ dest: dest, @@ -278,24 +232,38 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, progressOutput: progressOutput, progressInterval: options.ProgressInterval, progress: options.Progress, - copyInParallel: copyInParallel, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), ociDecryptConfig: options.OciDecryptConfig, ociEncryptConfig: options.OciEncryptConfig, - maxParallelDownloads: options.MaxParallelDownloads, downloadForeignLayers: options.DownloadForeignLayers, } - // Default to using gzip compression unless specified otherwise. - if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { - c.compressionFormat = compression.Gzip + + // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. + if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { + c.concurrentBlobCopiesSemaphore = options.ConcurrentBlobCopiesSemaphore + if c.concurrentBlobCopiesSemaphore == nil { + max := options.MaxParallelDownloads + if max == 0 { + max = maxParallelDownloads + } + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(max)) + } } else { - c.compressionFormat = *options.DestinationCtx.CompressionFormat + c.concurrentBlobCopiesSemaphore = semaphore.NewWeighted(int64(1)) + if options.ConcurrentBlobCopiesSemaphore != nil { + if err := options.ConcurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + return nil, fmt.Errorf("acquiring semaphore for concurrent blob copies: %w", err) + } + defer options.ConcurrentBlobCopiesSemaphore.Release(1) + } } + if options.DestinationCtx != nil { - // Note that the compressionLevel can be nil. + // Note that compressionFormat and compressionLevel can be nil. + c.compressionFormat = options.DestinationCtx.CompressionFormat c.compressionLevel = options.DestinationCtx.CompressionLevel } @@ -904,22 +872,9 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { // copyGroup is used to determine if all layers are copied copyGroup := sync.WaitGroup{} - // copySemaphore is used to limit the number of parallel downloads to - // avoid malicious images causing troubles and to be nice to servers. - var copySemaphore *semaphore.Weighted - if ic.c.copyInParallel { - max := ic.c.maxParallelDownloads - if max == 0 { - max = maxParallelDownloads - } - copySemaphore = semaphore.NewWeighted(int64(max)) - } else { - copySemaphore = semaphore.NewWeighted(int64(1)) - } - data := make([]copyLayerData, numLayers) copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { - defer copySemaphore.Release(1) + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) defer copyGroup.Done() cld := copyLayerData{} if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { @@ -957,17 +912,17 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error { } if err := func() error { // A scope for defer - progressPool, progressCleanup := ic.c.newProgressPool(ctx) - defer func() { - // Wait for all layers to be copied. progressCleanup() must not be called while any of the copyLayerHelpers interact with the progressPool. - copyGroup.Wait() - progressCleanup() - }() + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + + // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. + defer copyGroup.Wait() for i, srcLayer := range srcInfos { - err = copySemaphore.Acquire(ctx, 1) + err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1) if err != nil { - return errors.Wrapf(err, "Can't acquire semaphore") + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying layer: %w", err) } copyGroup.Add(1) go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference()) @@ -1061,15 +1016,13 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc return man, manifestDigest, nil } -// newProgressPool creates a *mpb.Progress and a cleanup function. -// The caller must eventually call the returned cleanup function after the pool will no longer be updated. -func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { - ctx, cancel := context.WithCancel(ctx) - pool := mpb.NewWithContext(ctx, mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) - return pool, func() { - cancel() - pool.Wait() - } +// newProgressPool creates a *mpb.Progress. +// The caller must eventually call pool.Wait() after the pool will no longer be updated. +// NOTE: Every progress bar created within the progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called. +func (c *copier) newProgressPool() *mpb.Progress { + return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput)) } // customPartialBlobCounter provides a decorator function for the partial blobs retrieval progress bar @@ -1090,6 +1043,9 @@ func customPartialBlobCounter(filler interface{}, wcc ...decor.WC) decor.Decorat // createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter // is ioutil.Discard, the progress bar's output will be discarded +// NOTE: Every progress bar created within a progress pool must either successfully +// complete or be aborted, or pool.Wait() will hang. That is typically done +// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called. func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { // shortDigestLen is the length of the digest used for blobs. const shortDigestLen = 12 @@ -1149,15 +1105,23 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { + if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying config: %w", err) + } + defer c.concurrentBlobCopiesSemaphore.Release(1) + configBlob, err := src.ConfigBlob(ctx) if err != nil { return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest) } destInfo, err := func() (types.BlobInfo, error) { // A scope for defer - progressPool, progressCleanup := c.newProgressPool(ctx) - defer progressCleanup() + progressPool := c.newProgressPool() + defer progressPool.Wait() bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done") + defer bar.Abort(false) + destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false) if err != nil { return types.BlobInfo{}, err @@ -1184,7 +1148,7 @@ type diffIDResult struct { // copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -// srcRef can be used as an additional hint to the destination during checking whehter a layer can be reused but srcRef can be nil. +// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { // If the srcInfo doesn't contain compression information, try to compute it from the // MediaType, which was either read from a manifest by way of LayerInfos() or constructed @@ -1245,8 +1209,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") - bar.SetTotal(0, true) + func() { // A scope for defer + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists") + defer bar.Abort(false) + bar.SetTotal(0, true) + }() // Throw an event that the layer has been skipped if ic.c.progress != nil && ic.c.progressInterval > 0 { @@ -1279,40 +1246,49 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable) imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial) if okSource && okDest && !diffIDIsNeeded { - bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") - - progress := make(chan int64) - terminate := make(chan interface{}) - - defer close(terminate) - defer close(progress) - - proxy := imageSourceSeekableProxy{ - source: imgSource, - progress: progress, - } - go func() { - for { - select { - case written := <-progress: - bar.IncrInt64(written) - case <-terminate: - return + if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer + bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + hideProgressBar := true + defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. + bar.Abort(hideProgressBar) + }() + + progress := make(chan int64) + terminate := make(chan interface{}) + + defer close(terminate) + defer close(progress) + + proxy := imageSourceSeekableProxy{ + source: imgSource, + progress: progress, + } + go func() { + for { + select { + case written := <-progress: + bar.IncrInt64(written) + case <-terminate: + return + } } + }() + + bar.SetTotal(srcInfo.Size, false) + info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) + if err == nil { + bar.SetRefill(srcInfo.Size - bar.Current()) + bar.SetCurrent(srcInfo.Size) + bar.SetTotal(srcInfo.Size, true) + hideProgressBar = false + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return true, info } - }() - - bar.SetTotal(srcInfo.Size, false) - info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache) - if err == nil { - bar.SetRefill(srcInfo.Size - bar.Current()) - bar.SetCurrent(srcInfo.Size) - bar.SetTotal(srcInfo.Size, true) - logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) - return info, cachedDiffID, nil + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{} + }(); reused { + return blobInfo, cachedDiffID, nil } - bar.Abort(true) - logrus.Debugf("Failed to retrieve partial blob: %v", err) } // Fallback: copy the layer, computing the diffID if we need to do so @@ -1322,32 +1298,35 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } defer srcStream.Close() - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + defer bar.Abort(false) - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) - if err != nil { - return types.BlobInfo{}, "", err - } + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + if err != nil { + return types.BlobInfo{}, "", err + } - diffID := cachedDiffID - if diffIDIsNeeded { - select { - case <-ctx.Done(): - return types.BlobInfo{}, "", ctx.Err() - case diffIDResult := <-diffIDChan: - if diffIDResult.err != nil { - return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID") + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + diffID = diffIDResult.digest } - logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) - diffID = diffIDResult.digest } - } - bar.SetTotal(srcInfo.Size, true) - return blobInfo, diffID, nil + bar.SetTotal(srcInfo.Size, true) + return blobInfo, diffID, nil + }() } // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. @@ -1502,7 +1481,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr // short-circuit conditions var inputInfo types.BlobInfo var compressionOperation types.LayerCompression - uploadCompressionFormat := &c.compressionFormat + var uploadCompressionFormat *compressiontypes.Algorithm srcCompressorName := internalblobinfocache.Uncompressed if isCompressed { srcCompressorName = compressionFormat.Name() @@ -1514,14 +1493,19 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr compressionOperation = types.PreserveOriginal inputInfo = srcInfo srcCompressorName = internalblobinfocache.UnknownCompression - uploadCompressorName = internalblobinfocache.UnknownCompression uploadCompressionFormat = nil + uploadCompressorName = internalblobinfocache.UnknownCompression } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { logrus.Debugf("Compressing blob on the fly") compressionOperation = types.Compress pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() + if c.compressionFormat != nil { + uploadCompressionFormat = c.compressionFormat + } else { + uploadCompressionFormat = defaultCompressionFormat + } // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. @@ -1530,7 +1514,8 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr inputInfo.Digest = "" inputInfo.Size = -1 uploadCompressorName = uploadCompressionFormat.Name() - } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() { + } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && + c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") @@ -1545,6 +1530,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() + uploadCompressionFormat = c.compressionFormat go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter destStream = pipeReader @@ -1562,14 +1548,13 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr destStream = s inputInfo.Digest = "" inputInfo.Size = -1 - uploadCompressorName = internalblobinfocache.Uncompressed uploadCompressionFormat = nil + uploadCompressorName = internalblobinfocache.Uncompressed } else { // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. logrus.Debugf("Using original blob without modification") compressionOperation = types.PreserveOriginal inputInfo = srcInfo - uploadCompressorName = srcCompressorName // Remember if the original blob was compressed, and if so how, so that if // LayerInfosForCopy() returned something that differs from what was in the // source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(), @@ -1579,6 +1564,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr } else { uploadCompressionFormat = nil } + uploadCompressorName = srcCompressorName } // === Encrypt the stream for valid mediatypes if ociEncryptConfig provided diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go new file mode 100644 index 00000000000..ccc9110ff90 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -0,0 +1,62 @@ +package copy + +import ( + "hash" + "io" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type digestingReader struct { + source io.Reader + digester digest.Digester + hash hash.Hash + expectedDigest digest.Digest + validationFailed bool + validationSucceeded bool +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. +// (neither is set if EOF is never reached). +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + var digester digest.Digester + if err := expectedDigest.Validate(); err != nil { + return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + } + digester = digestAlgorithm.Digester() + + return &digestingReader{ + source: source, + digester: digester, + hash: digester.Hash(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.hash.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + d.validationSucceeded = true + } + return n, err +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index e3280aa2b77..ea20e7c5e41 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -8,6 +8,7 @@ import ( "path/filepath" "runtime" + "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -141,7 +142,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available @@ -163,17 +164,15 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp } }() - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) + size, err := io.Copy(blobFile, stream) if err != nil { return types.BlobInfo{}, err } - computedDigest := digester.Digest() + blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err @@ -189,7 +188,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp } } - blobPath := d.ref.layerPath(computedDigest) + blobPath := d.ref.layerPath(blobDigest) // need to explicitly close the file, since a rename won't otherwise not work on Windows blobFile.Close() explicitClosed = true @@ -197,7 +196,7 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp return types.BlobInfo{}, err } succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil + return types.BlobInfo{Digest: blobDigest, Size: size}, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 360a7122ef7..80701a76162 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -17,6 +17,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" @@ -124,14 +125,14 @@ func (d *dockerImageDestination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - if inputInfo.Digest.String() != "" { + if inputInfo.Digest != "" { // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. @@ -161,10 +162,12 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, return types.BlobInfo{}, errors.Wrap(err, "determining upload URL") } - digester := digest.Canonical.Digester() + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) sizeCounter := &sizeCounter{} + stream = io.TeeReader(stream, sizeCounter) + uploadLocation, err = func() (*url.URL, error) { // A scope for defer - uploadReader := uploadreader.NewUploadReader(io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))) + uploadReader := uploadreader.NewUploadReader(stream) // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL // returns, so there isn’t a way for the error text to be provided to any of our callers. defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) @@ -186,13 +189,12 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, if err != nil { return types.BlobInfo{}, err } - computedDigest := digester.Digest() + blobDigest := digester.Digest() // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) locationQuery := uploadLocation.Query() - // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 - locationQuery.Set("digest", computedDigest.String()) + locationQuery.Set("digest", blobDigest.String()) uploadLocation.RawQuery = locationQuery.Encode() res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { @@ -204,9 +206,9 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, return types.BlobInfo{}, errors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation) } - logrus.Debugf("Upload of layer %s complete", computedDigest) - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) - return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil + logrus.Debugf("Upload of layer %s complete", blobDigest) + cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) + return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil } // blobExists returns true iff repo contains a blob with digest, and if so, also its size. @@ -485,7 +487,7 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [ return nil } if instanceDigest == nil { - if d.manifestDigest.String() == "" { + if d.manifestDigest == "" { // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures return errors.Errorf("Unknown manifest digest, can't add signatures") } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 5dc8e7b1f06..1333cf9e2ef 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -278,7 +278,78 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool { return true } +// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks +func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) { + defer close(streams) + defer close(errs) + currentOffset := uint64(0) + + body = makeBufferedNetworkReader(body, 64, 16384) + defer body.Close() + for _, c := range chunks { + if c.Offset != currentOffset { + if c.Offset < currentOffset { + errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset) + break + } + toSkip := c.Offset - currentOffset + if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil { + errs <- err + break + } + currentOffset += toSkip + } + s := signalCloseReader{ + closed: make(chan interface{}), + stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))), + consumeStream: true, + } + streams <- s + + // Wait until the stream is closed before going to the next chunk + <-s.closed + currentOffset += c.Length + } +} + +// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. +func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) { + defer close(streams) + defer close(errs) + if !strings.HasPrefix(mediaType, "multipart/") { + streams <- body + return + } + boundary, found := params["boundary"] + if !found { + errs <- errors.Errorf("could not find boundary") + body.Close() + return + } + buffered := makeBufferedNetworkReader(body, 64, 16384) + defer buffered.Close() + mr := multipart.NewReader(buffered, boundary) + for { + p, err := mr.NextPart() + if err != nil { + if err != io.EOF { + errs <- err + } + return + } + s := signalCloseReader{ + closed: make(chan interface{}), + stream: p, + } + streams <- s + // NextPart() cannot be called while the current part + // is being read, so wait until it is closed + <-s.closed + } +} + // GetBlobAt returns a stream for the specified blob. +// The specified chunks must be not overlapping and sorted by their offset. func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) @@ -305,53 +376,30 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, } return nil, nil, err } - if res.StatusCode != http.StatusPartialContent { - res.Body.Close() - return nil, nil, errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) - } - mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type")) - if err != nil { - return nil, nil, err - } + switch res.StatusCode { + case http.StatusOK: + // if the server replied with a 200 status code, convert the full body response to a series of + // streams as it would have been done with 206. + streams := make(chan io.ReadCloser) + errs := make(chan error) + go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks) + return streams, errs, nil + case http.StatusPartialContent: + mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type")) + if err != nil { + return nil, nil, err + } - streams := make(chan io.ReadCloser) - errs := make(chan error) + streams := make(chan io.ReadCloser) + errs := make(chan error) - go func() { - defer close(streams) - defer close(errs) - if !strings.HasPrefix(mediaType, "multipart/") { - streams <- res.Body - return - } - boundary, found := params["boundary"] - if !found { - errs <- errors.Errorf("could not find boundary") - return - } - buffered := makeBufferedNetworkReader(res.Body, 64, 16384) - defer buffered.Close() - mr := multipart.NewReader(buffered, boundary) - for { - p, err := mr.NextPart() - if err != nil { - if err != io.EOF { - errs <- err - } - return - } - s := signalCloseReader{ - Closed: make(chan interface{}), - Stream: p, - } - streams <- s - // NextPart() cannot be called while the current part - // is being read, so wait until it is closed - <-s.Closed - } - }() - return streams, errs, nil + go handle206Response(streams, errs, res.Body, chunks, mediaType, params) + return streams, errs, nil + default: + res.Body.Close() + return nil, nil, errors.Errorf("invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) + } } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). @@ -585,7 +633,7 @@ type bufferedNetworkReaderBuffer struct { } type bufferedNetworkReader struct { - stream io.Reader + stream io.ReadCloser emptyBuffer chan *bufferedNetworkReaderBuffer readyBuffer chan *bufferedNetworkReaderBuffer terminate chan bool @@ -611,9 +659,10 @@ func handleBufferedNetworkReader(br *bufferedNetworkReader) { } } -func (n *bufferedNetworkReader) Close() { +func (n *bufferedNetworkReader) Close() error { close(n.terminate) close(n.emptyBuffer) + return n.stream.Close() } func (n *bufferedNetworkReader) read(p []byte) (int, error) { @@ -657,7 +706,7 @@ func (n *bufferedNetworkReader) Read(p []byte) (int, error) { return n.read(p) } -func makeBufferedNetworkReader(stream io.Reader, nBuffers, bufferSize uint) *bufferedNetworkReader { +func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader { br := bufferedNetworkReader{ stream: stream, emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), @@ -680,15 +729,22 @@ func makeBufferedNetworkReader(stream io.Reader, nBuffers, bufferSize uint) *buf } type signalCloseReader struct { - Closed chan interface{} - Stream io.ReadCloser + closed chan interface{} + stream io.ReadCloser + consumeStream bool } func (s signalCloseReader) Read(p []byte) (int, error) { - return s.Stream.Read(p) + return s.stream.Read(p) } func (s signalCloseReader) Close() error { - defer close(s.Closed) - return s.Stream.Close() + defer close(s.closed) + if s.consumeStream { + if _, err := io.Copy(ioutil.Discard, s.stream); err != nil { + s.stream.Close() + return err + } + } + return s.stream.Close() } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index a558657b676..44b0af110ab 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" @@ -86,7 +87,7 @@ func (d *Destination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available @@ -95,7 +96,7 @@ func (d *Destination) HasThreadSafePutBlob() bool { func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { // Ouch, we need to stream the blob into a temporary file just to determine the size. // When the layer is decompressed, we also have to generate the digest on uncompressed data. - if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { + if inputInfo.Size == -1 || inputInfo.Digest == "" { logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob") if err != nil { @@ -104,10 +105,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t defer os.Remove(streamCopy.Name()) defer streamCopy.Close() - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) + digester, stream2 := putblobdigest.DigestIfUnknown(stream, inputInfo) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(streamCopy, tee) + size, err := io.Copy(streamCopy, stream2) if err != nil { return types.BlobInfo{}, err } @@ -116,9 +116,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t return types.BlobInfo{}, err } inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. - if inputInfo.Digest == "" { - inputInfo.Digest = digester.Digest() - } + inputInfo.Digest = digester.Digest() stream = streamCopy logrus.Debugf("... streaming done") } diff --git a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go index 4f2465cac4c..65d60c37a18 100644 --- a/vendor/github.com/containers/image/v5/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/tarfile/dest.go @@ -72,7 +72,7 @@ func (d *Destination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go index 88e123cdd1d..bf6cc87d421 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package keyctl diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go index 91c64a1b8ba..5eaad615c7c 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux // Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go index ae9697149d1..5f4d2157ae9 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package keyctl diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go index 196c8276077..f61666e42c2 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package keyctl diff --git a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go new file mode 100644 index 00000000000..b8d3a7e56d7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go @@ -0,0 +1,57 @@ +package putblobdigest + +import ( + "io" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Digester computes a digest of the provided stream, if not known yet. +type Digester struct { + knownDigest digest.Digest // Or "" + digester digest.Digester // Or nil +} + +// newDigester initiates computation of a digest.Canonical digest of stream, +// if !validDigest; otherwise it just records knownDigest to be returned later. +// The caller MUST use the returned stream instead of the original value. +func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { + if validDigest { + return Digester{knownDigest: knownDigest}, stream + } else { + res := Digester{ + digester: digest.Canonical.Digester(), + } + stream = io.TeeReader(stream, res.digester.Hash()) + return res, stream + } +} + +// DigestIfUnknown initiates computation of a digest.Canonical digest of stream, +// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will +// be used (accepting any algorithm). +// The caller MUST use the returned stream instead of the original value. +func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "") +} + +// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, +// if a digest.Canonical digest is not supplied in the provided blobInfo; +// otherwise blobInfo.Digest will be used. +// The caller MUST use the returned stream instead of the original value. +func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical) +} + +// Digest() returns a digest value possibly computed by Digester. +// This must be called only after all of the stream returned by a Digester constructor +// has been successfully read. +func (d Digester) Digest() digest.Digest { + if d.digester != nil { + return d.digester.Digest() + } + return d.knownDigest +} diff --git a/vendor/github.com/containers/image/v5/internal/types/types.go b/vendor/github.com/containers/image/v5/internal/types/types.go index e0355a4772b..388f8cf3b49 100644 --- a/vendor/github.com/containers/image/v5/internal/types/types.go +++ b/vendor/github.com/containers/image/v5/internal/types/types.go @@ -70,6 +70,7 @@ type ImageSourceChunk struct { // This API is experimental and can be changed without bumping the major version number. type ImageSourceSeekable interface { // GetBlobAt returns a stream for the specified blob. + // The specified chunks must be not overlapping and sorted by their offset. GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error) } diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 065a0b055c8..3d8738db536 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -88,7 +88,7 @@ func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index d1d06d64d8c..d0ee7263527 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -9,6 +9,7 @@ import ( "path/filepath" "runtime" + "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" @@ -115,7 +116,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. @@ -138,17 +139,15 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp } }() - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) + size, err := io.Copy(blobFile, stream) if err != nil { return types.BlobInfo{}, err } - computedDigest := digester.Digest() + blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err @@ -164,7 +163,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp } } - blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) + blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) if err != nil { return types.BlobInfo{}, err } @@ -179,7 +178,7 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp return types.BlobInfo{}, err } succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil + return types.BlobInfo{Digest: blobDigest, Size: size}, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index f9f811784c8..4ffbced6bd6 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -279,7 +279,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest } // ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// ConfirmUsable looks a particular context and determines if that particular part of the config is usable. There might still be errors in the config, // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func (config *directClientConfig) ConfirmUsable() error { var validationErrors []error diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index 6ea65bcf3f4..c7c6cf6945a 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -395,7 +395,7 @@ func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index c91a49c57a4..3eb2a2cba22 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -1,3 +1,4 @@ +//go:build containers_image_ostree // +build containers_image_ostree package ostree @@ -20,6 +21,7 @@ import ( "time" "unsafe" + "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" @@ -138,7 +140,7 @@ func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. @@ -158,25 +160,23 @@ func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, } defer blobFile.Close() - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - size, err := io.Copy(blobFile, tee) + size, err := io.Copy(blobFile, stream) if err != nil { return types.BlobInfo{}, err } - computedDigest := digester.Digest() + blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) + return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err } - hash := computedDigest.Hex() - d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} - return types.BlobInfo{Digest: computedDigest, Size: size}, nil + hash := blobDigest.Hex() + d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} + return types.BlobInfo{Digest: blobDigest, Size: size}, nil } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go index 4948ec66416..d30c764a630 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -1,3 +1,4 @@ +//go:build containers_image_ostree // +build containers_image_ostree package ostree diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go index a55147b85e3..1e35ab6059f 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -1,3 +1,4 @@ +//go:build containers_image_ostree // +build containers_image_ostree package ostree diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index c82a9e1a0be..e37f4c19e53 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -478,7 +478,7 @@ func listAuthsFromCredHelper(credHelper string) (map[string]string, error) { return helperclient.List(p) } -// getPathToAuth gets the path of the auth.json file used for reading and writting credentials +// getPathToAuth gets the path of the auth.json file used for reading and writing credentials // returns the path, and a bool specifies whether the file is in legacy format func getPathToAuth(sys *types.SystemContext) (string, bool, error) { return getPathToAuthWithOS(sys, runtime.GOOS) @@ -601,10 +601,18 @@ func getAuthFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, if err != nil { return types.DockerAuthConfig{}, err } - return types.DockerAuthConfig{ - Username: creds.Username, - Password: creds.Secret, - }, nil + + switch creds.Username { + case "": + return types.DockerAuthConfig{ + IdentityToken: creds.Secret, + }, nil + default: + return types.DockerAuthConfig{ + Username: creds.Username, + Password: creds.Secret, + }, nil + } } func setAuthToCredHelper(credHelper, registry, username, password string) error { diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go index 1354ee46d96..0bf16125919 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go @@ -10,7 +10,7 @@ import ( ) // NOTE: none of the functions here are currently used. If we ever want to -// reenable keyring support, we should introduce a similar built-in credential +// re-enable keyring support, we should introduce a similar built-in credential // helpers as for `sysregistriesv2.AuthenticationFileHelper`. const keyDescribePrefix = "container-registry-login:" //nolint:deadcode,unused diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go index 65e5804101f..d9827d8edbc 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux && (!386 || !amd64) // +build !linux // +build !386 !amd64 diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go index a0afc34b42d..6ae74d430d6 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go @@ -1,3 +1,4 @@ +//go:build !containers_image_openpgp // +build !containers_image_openpgp package signature diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go index a0576028436..0a09788f989 100644 --- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go +++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go @@ -1,3 +1,4 @@ +//go:build containers_image_openpgp // +build containers_image_openpgp package signature diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 6b0fea61a29..7329ef6eee0 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -1,3 +1,4 @@ +//go:build !containers_image_storage_stub // +build !containers_image_storage_stub package storage @@ -17,13 +18,14 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" + "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/tmpdir" internalTypes "github.com/containers/image/v5/internal/types" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" "github.com/containers/storage" - "github.com/containers/storage/drivers" + graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked" "github.com/containers/storage/pkg/ioutils" @@ -34,8 +36,10 @@ import ( ) var ( - // ErrBlobDigestMismatch is returned when PutBlob() is given a blob + // ErrBlobDigestMismatch could potentially be returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. + // Deprecated: PutBlob() doesn't do this any more (it just accepts the caller’s value), + // and there is no known user of this error. ErrBlobDigestMismatch = stderrors.New("blob digest mismatch") // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. @@ -468,7 +472,7 @@ func (s *storageImageDestination) HasThreadSafePutBlob() bool { } // PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. @@ -482,26 +486,28 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, Digest: "", Size: -1, } - // Set up to digest the blob and count its size while saving it to a file. - hasher := digest.Canonical.Digester() - if blobinfo.Digest.Validate() == nil { - if a := blobinfo.Digest.Algorithm(); a.Available() { - hasher = a.Digester() + if blobinfo.Digest != "" { + if err := blobinfo.Digest.Validate(); err != nil { + return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) } } - diffID := digest.Canonical.Digester() + + // Set up to digest the blob if necessary, and count its size while saving it to a file. filename := s.computeNextBlobCacheFile() file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) if err != nil { return errorBlobInfo, errors.Wrapf(err, "creating temporary file %q", filename) } defer file.Close() - counter := ioutils.NewWriteCounter(hasher.Hash()) - reader := io.TeeReader(io.TeeReader(stream, counter), file) - decompressed, err := archive.DecompressStream(reader) + counter := ioutils.NewWriteCounter(file) + stream = io.TeeReader(stream, counter) + digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) + decompressed, err := archive.DecompressStream(stream) if err != nil { return errorBlobInfo, errors.Wrap(err, "setting up to decompress blob") } + + diffID := digest.Canonical.Digester() // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). _, err = io.Copy(diffID.Hash(), decompressed) @@ -509,28 +515,25 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, if err != nil { return errorBlobInfo, errors.Wrapf(err, "storing blob to file %q", filename) } - // Ensure that any information that we were given about the blob is correct. - if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { - return errorBlobInfo, errors.WithStack(ErrBlobDigestMismatch) - } - if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + + // Determine blob properties, and fail if information that we were given about the blob + // is known to be incorrect. + blobDigest := digester.Digest() + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count + } else if blobinfo.Size != counter.Count { return errorBlobInfo, errors.WithStack(ErrBlobSizeMismatch) } + // Record information about the blob. s.lock.Lock() - s.blobDiffIDs[hasher.Digest()] = diffID.Digest() - s.fileSizes[hasher.Digest()] = counter.Count - s.filenames[hasher.Digest()] = filename + s.blobDiffIDs[blobDigest] = diffID.Digest() + s.fileSizes[blobDigest] = counter.Count + s.filenames[blobDigest] = filename s.lock.Unlock() - blobDigest := blobinfo.Digest - if blobDigest.Validate() != nil { - blobDigest = hasher.Digest() - } - blobSize := blobinfo.Size - if blobSize < 0 { - blobSize = counter.Count - } - // This is safe because we have just computed both values ourselves. + // This is safe because we have just computed diffID, and blobDigest was either computed + // by us, or validated by the caller (usually copy.digestingReader). cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) return types.BlobInfo{ Digest: blobDigest, @@ -813,7 +816,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. // // The conceptual benefit of this design is that caller can continue // pulling layers after an early return. At any given time, only one - // caller is the "worker" routine comitting layers. All other routines + // caller is the "worker" routine committing layers. All other routines // can continue pulling and queuing in layers. s.lock.Lock() s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{ @@ -852,7 +855,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. // must guarantee that, at any given time, at most one goroutine may execute // `commitLayer()`. func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error { - // Already commited? Return early. + // Already committed? Return early. if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { return nil } @@ -1004,7 +1007,10 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest defer file.Close() // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: blob.Digest, + UncompressedDigest: diffID, + }, file) if err != nil && errors.Cause(err) != storage.ErrDuplicateID { return errors.Wrapf(err, "adding layer with blob %q", blob.Digest) } @@ -1065,7 +1071,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t if len(layerBlobs) > 0 { // Can happen when using caches prev := s.indexToStorageID[len(layerBlobs)-1] if prev == nil { - return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been commited (lastLayer == nil)", len(layerBlobs)-1) + return errors.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) } lastLayer = *prev } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 1aafe9068cf..7c6da112c74 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -1,3 +1,4 @@ +//go:build !containers_image_storage_stub // +build !containers_image_storage_stub package storage diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index d4c85b7256a..ab59c8a290e 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -1,3 +1,4 @@ +//go:build !containers_image_storage_stub // +build !containers_image_storage_stub package storage diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go index 82224052e66..ffac6e0b8a3 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon.go @@ -1,3 +1,4 @@ +//go:build !containers_image_docker_daemon_stub // +build !containers_image_docker_daemon_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go index d137007991f..ddc347bf35d 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/docker_daemon_stub.go @@ -1,3 +1,4 @@ +//go:build containers_image_docker_daemon_stub // +build containers_image_docker_daemon_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go index 72432d1ef80..2340702bdc5 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree.go @@ -1,3 +1,4 @@ +//go:build containers_image_ostree && linux // +build containers_image_ostree,linux package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go index f4a862bd4e4..8c4175188f0 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/ostree_stub.go @@ -1,3 +1,4 @@ +//go:build !containers_image_ostree || !linux // +build !containers_image_ostree !linux package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go index 7041eb876af..1e399cdb024 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/storage.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage.go @@ -1,3 +1,4 @@ +//go:build !containers_image_storage_stub // +build !containers_image_storage_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go index 67f0291cc08..30802661f17 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/storage_stub.go @@ -1,3 +1,4 @@ +//go:build containers_image_storage_stub // +build containers_image_storage_stub package alltransports diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 1c4a1419fec..354b3f6631b 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -299,7 +299,7 @@ type ImageDestination interface { IgnoresEmbeddedDockerReference() bool // PutBlob writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 478a03b0515..b9f8c3e9ffe 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 15 + VersionMinor = 16 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 1955f2878c8..9fe803a5e90 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,6 +1,12 @@ -## unreleased +## 1.4.2 -* Fix regression where `*time.Time` value would be set to empty and not be sent +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent to decode hooks properly [GH-232] ## 1.4.0 diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 92e6f76fff4..4d4bbc733ba 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -62,7 +62,8 @@ func DecodeHookExec( func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error - var data interface{} + data := f.Interface() + newFrom := f for _, f1 := range fs { data, err = DecodeHookExec(f1, newFrom, t) diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 3643901f55f..dcee0f2d634 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -192,7 +192,7 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) -// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target // values. type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) @@ -258,6 +258,11 @@ type DecoderConfig struct { // The tag name that mapstructure reads for field names. This // defaults to "mapstructure" TagName string + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool } // A Decoder takes a raw interface value and turns it into structured @@ -376,6 +381,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + result := &Decoder{ config: config, } @@ -1340,7 +1349,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } - if strings.EqualFold(mK, fieldName) { + if d.config.MatchName(mK, fieldName) { rawMapKey = dataValKey rawMapVal = dataVal.MapIndex(dataValKey) break diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 35d8108958f..581cf7cdfad 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -53,4 +53,10 @@ const ( // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index fe799bd698c..ffff4b6d186 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -89,9 +89,20 @@ type Image struct { // Architecture is the CPU architecture which the binaries in this image are built to run on. Architecture string `json:"architecture"` + // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. + Variant string `json:"variant,omitempty"` + // OS is the name of the operating system which the image is built to run on. OS string `json:"os"` + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v7/README.md index d0560d799ef..90d4fe639ca 100644 --- a/vendor/github.com/vbauerster/mpb/v7/README.md +++ b/vendor/github.com/vbauerster/mpb/v7/README.md @@ -84,7 +84,7 @@ func main() { // replace ETA decorator with "done" message, OnComplete event decor.OnComplete( // ETA decorator with ewma age of 60 - decor.EwmaETA(decor.ET_STYLE_GO, 60), "done", + decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WCSyncWidth), "done", ), ), ) diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go index ed6c73edaeb..ca191cf3910 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar.go @@ -20,21 +20,18 @@ type Bar struct { priority int // used by heap index int // used by heap - extendedLines int toShutdown bool toDrop bool noPop bool hasEwmaDecorators bool operateState chan func(*bState) - frameCh chan io.Reader - syncTableCh chan [][]chan int - completed chan bool + frameCh chan *frame // cancel is called either by user or on complete event cancel func() // done is closed after cacheState is assigned done chan struct{} - // cacheState is populated, right after close(shutdown) + // cacheState is populated, right after close(b.done) cacheState *bState container *Progress @@ -77,6 +74,11 @@ type bState struct { debugOut io.Writer } +type frame struct { + reader io.Reader + lines int +} + func newBar(container *Progress, bs *bState) *Bar { logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id) ctx, cancel := context.WithCancel(container.ctx) @@ -87,9 +89,7 @@ func newBar(container *Progress, bs *bState) *Bar { toDrop: bs.dropOnComplete, noPop: bs.noPop, operateState: make(chan func(*bState)), - frameCh: make(chan io.Reader, 1), - syncTableCh: make(chan [][]chan int, 1), - completed: make(chan bool, 1), + frameCh: make(chan *frame, 1), done: make(chan struct{}), cancel: cancel, dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile), @@ -145,6 +145,7 @@ func (b *Bar) SetRefill(amount int64) { // TraverseDecorators traverses all available decorators and calls cb func on each. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { + done := make(chan struct{}) select { case b.operateState <- func(s *bState) { for _, decorators := range [...][]decor.Decorator{ @@ -155,7 +156,9 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { cb(extractBaseDecorator(d)) } } + close(done) }: + <-done case <-b.done: } } @@ -174,7 +177,7 @@ func (b *Bar) SetTotal(total int64, triggerComplete bool) { if s.triggerComplete && !s.completed { s.current = s.total s.completed = true - go b.refreshTillShutdown() + go b.forceRefreshIfLastUncompleted() } }: case <-b.done: @@ -192,7 +195,7 @@ func (b *Bar) SetCurrent(current int64) { if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.refreshTillShutdown() + go b.forceRefreshIfLastUncompleted() } }: case <-b.done: @@ -219,7 +222,7 @@ func (b *Bar) IncrInt64(n int64) { if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.refreshTillShutdown() + go b.forceRefreshIfLastUncompleted() } }: case <-b.done: @@ -258,32 +261,49 @@ func (b *Bar) DecoratorAverageAdjust(start time.Time) { // priority, i.e. bar will be on top. If you don't need to set priority // dynamically, better use BarPriority option. func (b *Bar) SetPriority(priority int) { - select { - case <-b.done: - default: - b.container.setBarPriority(b, priority) - } + b.container.UpdateBarPriority(b, priority) } -// Abort interrupts bar's running goroutine. Call this, if you'd like -// to stop/remove bar before completion event. It has no effect after -// completion event. If drop is true bar will be removed as well. +// Abort interrupts bar's running goroutine. Abort won't be engaged +// if bar is already in complete state. If drop is true bar will be +// removed as well. func (b *Bar) Abort(drop bool) { select { - case <-b.done: - default: + case b.operateState <- func(s *bState) { + if s.completed == true { + return + } if drop { b.container.dropBar(b) + b.cancel() + return } - b.cancel() + go func() { + var uncompleted int + b.container.traverseBars(func(bar *Bar) bool { + if b != bar && !bar.Completed() { + uncompleted++ + return false + } + return true + }) + if uncompleted == 0 { + b.container.refreshCh <- time.Now() + } + b.cancel() + }() + }: + <-b.done + case <-b.done: } } // Completed reports whether the bar is in completed state. func (b *Bar) Completed() bool { + result := make(chan bool) select { - case b.operateState <- func(s *bState) { b.completed <- s.completed }: - return <-b.completed + case b.operateState <- func(s *bState) { result <- s.completed }: + return <-result case <-b.done: return true } @@ -296,12 +316,12 @@ func (b *Bar) serve(ctx context.Context, s *bState) { case op := <-b.operateState: op(s) case <-ctx.Done(): - b.cacheState = s - close(b.done) // Notifying decorators about shutdown event for _, sl := range s.shutdownListeners { sl.Shutdown() } + b.cacheState = s + close(b.done) return } } @@ -319,17 +339,15 @@ func (b *Bar) render(tw int) { b.toShutdown = !b.toShutdown b.recoveredPanic = p } - frame, lines := s.extender(nil, s.reqWidth, stat) - b.extendedLines = lines - b.frameCh <- frame + reader, lines := s.extender(nil, s.reqWidth, stat) + b.frameCh <- &frame{reader, lines + 1} b.dlogger.Println(p) } s.completeFlushed = s.completed }() - frame, lines := s.extender(s.draw(stat), s.reqWidth, stat) - b.extendedLines = lines + reader, lines := s.extender(s.draw(stat), s.reqWidth, stat) b.toShutdown = s.completed && !s.completeFlushed - b.frameCh <- frame + b.frameCh <- &frame{reader, lines + 1} }: case <-b.done: s := b.cacheState @@ -338,9 +356,8 @@ func (b *Bar) render(tw int) { if b.recoveredPanic == nil { r = s.draw(stat) } - frame, lines := s.extender(r, s.reqWidth, stat) - b.extendedLines = lines - b.frameCh <- frame + reader, lines := s.extender(r, s.reqWidth, stat) + b.frameCh <- &frame{reader, lines + 1} } } @@ -359,31 +376,42 @@ func (b *Bar) subscribeDecorators() { shutdownListeners = append(shutdownListeners, d) } }) + b.hasEwmaDecorators = len(ewmaDecorators) != 0 select { case b.operateState <- func(s *bState) { s.averageDecorators = averageDecorators s.ewmaDecorators = ewmaDecorators s.shutdownListeners = shutdownListeners }: - b.hasEwmaDecorators = len(ewmaDecorators) != 0 case <-b.done: } } -func (b *Bar) refreshTillShutdown() { - for { - select { - case b.container.refreshCh <- time.Now(): - case <-b.done: - return +func (b *Bar) forceRefreshIfLastUncompleted() { + var uncompleted int + b.container.traverseBars(func(bar *Bar) bool { + if b != bar && !bar.Completed() { + uncompleted++ + return false + } + return true + }) + if uncompleted == 0 { + for { + select { + case b.container.refreshCh <- time.Now(): + case <-b.done: + return + } } } } func (b *Bar) wSyncTable() [][]chan int { + result := make(chan [][]chan int) select { - case b.operateState <- func(s *bState) { b.syncTableCh <- s.wSyncTable() }: - return <-b.syncTableCh + case b.operateState <- func(s *bState) { result <- s.wSyncTable() }: + return <-result case <-b.done: return b.cacheState.wSyncTable() } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go index e30d4921c63..80b2104555d 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go @@ -26,15 +26,17 @@ type BarStyleComposer interface { Filler(string) BarStyleComposer Refiller(string) BarStyleComposer Padding(string) BarStyleComposer - Tip(...string) BarStyleComposer + TipOnComplete(string) BarStyleComposer + Tip(frames ...string) BarStyleComposer Reverse() BarStyleComposer } type bFiller struct { components [components]*component tip struct { - count uint - frames []*component + count uint + onComplete *component + frames []*component } flush func(dst io.Writer, filling, padding [][]byte) } @@ -45,25 +47,26 @@ type component struct { } type barStyle struct { - lbound string - rbound string - filler string - refiller string - padding string - tip []string - rev bool + lbound string + rbound string + filler string + refiller string + padding string + tipOnComplete string + tipFrames []string + rev bool } // BarStyle constructs default bar style which can be altered via // BarStyleComposer interface. func BarStyle() BarStyleComposer { return &barStyle{ - lbound: "[", - rbound: "]", - filler: "=", - refiller: "+", - padding: "-", - tip: []string{">"}, + lbound: "[", + rbound: "]", + filler: "=", + refiller: "+", + padding: "-", + tipFrames: []string{">"}, } } @@ -92,9 +95,14 @@ func (s *barStyle) Padding(padding string) BarStyleComposer { return s } -func (s *barStyle) Tip(tip ...string) BarStyleComposer { - if len(tip) != 0 { - s.tip = append(s.tip[:0], tip...) +func (s *barStyle) TipOnComplete(tip string) BarStyleComposer { + s.tipOnComplete = tip + return s +} + +func (s *barStyle) Tip(frames ...string) BarStyleComposer { + if len(frames) != 0 { + s.tipFrames = append(s.tipFrames[:0], frames...) } return s } @@ -133,8 +141,12 @@ func (s *barStyle) Build() BarFiller { width: runewidth.StringWidth(stripansi.Strip(s.padding)), bytes: []byte(s.padding), } - bf.tip.frames = make([]*component, len(s.tip)) - for i, t := range s.tip { + bf.tip.onComplete = &component{ + width: runewidth.StringWidth(stripansi.Strip(s.tipOnComplete)), + bytes: []byte(s.tipOnComplete), + } + bf.tip.frames = make([]*component, len(s.tipFrames)) + for i, t := range s.tipFrames { bf.tip.frames[i] = &component{ width: runewidth.StringWidth(stripansi.Strip(t)), bytes: []byte(t), @@ -146,64 +158,82 @@ func (s *barStyle) Build() BarFiller { func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { width = internal.CheckRequestedWidth(width, stat.AvailableWidth) brackets := s.components[iLbound].width + s.components[iRbound].width - if width < brackets { - return - } // don't count brackets as progress width -= brackets + if width < 0 { + return + } w.Write(s.components[iLbound].bytes) defer w.Write(s.components[iRbound].bytes) - curWidth := int(internal.PercentageRound(stat.Total, stat.Current, width)) - refWidth, filled := 0, curWidth - filling := make([][]byte, 0, curWidth) - - if curWidth > 0 && curWidth != width { - tipFrame := s.tip.frames[s.tip.count%uint(len(s.tip.frames))] - filling = append(filling, tipFrame.bytes) - curWidth -= tipFrame.width - s.tip.count++ + if width == 0 { + return } - if stat.Refill > 0 && curWidth > 0 { - refWidth = int(internal.PercentageRound(stat.Total, int64(stat.Refill), width)) - if refWidth > curWidth { - refWidth = curWidth - } - curWidth -= refWidth + var filling [][]byte + var padding [][]byte + var tip *component + var filled int + var refWidth int + curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width))) + + if stat.Current >= stat.Total { + tip = s.tip.onComplete + } else { + tip = s.tip.frames[s.tip.count%uint(len(s.tip.frames))] } - for curWidth > 0 && curWidth >= s.components[iFiller].width { - filling = append(filling, s.components[iFiller].bytes) - curWidth -= s.components[iFiller].width - if s.components[iFiller].width == 0 { - break - } + if curWidth > 0 { + filling = append(filling, tip.bytes) + filled += tip.width + s.tip.count++ } - for refWidth > 0 && refWidth >= s.components[iRefiller].width { - filling = append(filling, s.components[iRefiller].bytes) - refWidth -= s.components[iRefiller].width - if s.components[iRefiller].width == 0 { - break + if stat.Refill > 0 { + refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, uint(width))) + curWidth -= refWidth + refWidth += curWidth + } + + for filled < curWidth { + if curWidth-filled >= s.components[iFiller].width { + filling = append(filling, s.components[iFiller].bytes) + if s.components[iFiller].width == 0 { + break + } + filled += s.components[iFiller].width + } else { + filling = append(filling, []byte("…")) + filled++ } } - filled -= curWidth + refWidth - padWidth := width - filled - padding := make([][]byte, 0, padWidth) - for padWidth > 0 && padWidth >= s.components[iPadding].width { - padding = append(padding, s.components[iPadding].bytes) - padWidth -= s.components[iPadding].width - if s.components[iPadding].width == 0 { - break + for filled < refWidth { + if refWidth-filled >= s.components[iRefiller].width { + filling = append(filling, s.components[iRefiller].bytes) + if s.components[iRefiller].width == 0 { + break + } + filled += s.components[iRefiller].width + } else { + filling = append(filling, []byte("…")) + filled++ } } + padWidth := width - filled for padWidth > 0 { - padding = append(padding, []byte("…")) - padWidth-- + if padWidth >= s.components[iPadding].width { + padding = append(padding, s.components[iPadding].bytes) + if s.components[iPadding].width == 0 { + break + } + padWidth -= s.components[iPadding].width + } else { + padding = append(padding, []byte("…")) + padWidth-- + } } s.flush(w, filling, padding) diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v7/container_option.go index e4254f66257..a858c3c51dd 100644 --- a/vendor/github.com/vbauerster/mpb/v7/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v7/container_option.go @@ -62,7 +62,11 @@ func WithRenderDelay(ch <-chan struct{}) ContainerOption { // have been rendered. func WithShutdownNotifier(ch chan struct{}) ContainerOption { return func(s *pState) { - s.shutdownNotifier = ch + select { + case <-ch: + default: + s.shutdownNotifier = ch + } } } diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go index 1ade54761dd..925c8b1dcf1 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go +++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go @@ -22,7 +22,7 @@ const ( type Writer struct { out io.Writer buf bytes.Buffer - lineCount int + lines int fd int isTerminal bool } @@ -38,15 +38,15 @@ func New(out io.Writer) *Writer { } // Flush flushes the underlying buffer. -func (w *Writer) Flush(lineCount int) (err error) { +func (w *Writer) Flush(lines int) (err error) { // some terminals interpret 'cursor up 0' as 'cursor up 1' - if w.lineCount > 0 { + if w.lines > 0 { err = w.clearLines() if err != nil { return } } - w.lineCount = lineCount + w.lines = lines _, err = w.buf.WriteTo(w.out) return } @@ -78,7 +78,7 @@ func (w *Writer) GetWidth() (int, error) { func (w *Writer) ansiCuuAndEd() (err error) { buf := make([]byte, 8) - buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lineCount), 10) + buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10) _, err = w.out.Write(append(buf, cuuAndEd...)) return } diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go index 1a69c81acf0..8f99dbe324e 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go +++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go @@ -26,7 +26,7 @@ func (w *Writer) clearLines() error { return err } - info.CursorPosition.Y -= int16(w.lineCount) + info.CursorPosition.Y -= int16(w.lines) if info.CursorPosition.Y < 0 { info.CursorPosition.Y = 0 } @@ -40,7 +40,7 @@ func (w *Writer) clearLines() error { X: info.Window.Left, Y: info.CursorPosition.Y, } - count := uint32(info.Size.X) * uint32(w.lineCount) + count := uint32(info.Size.X) * uint32(w.lines) _, _, _ = procFillConsoleOutputCharacter.Call( uintptr(w.fd), uintptr(' '), diff --git a/vendor/github.com/vbauerster/mpb/v7/go.mod b/vendor/github.com/vbauerster/mpb/v7/go.mod index 22a2c651cba..7b177d0dbf6 100644 --- a/vendor/github.com/vbauerster/mpb/v7/go.mod +++ b/vendor/github.com/vbauerster/mpb/v7/go.mod @@ -4,7 +4,7 @@ require ( github.com/VividCortex/ewma v1.2.0 github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/mattn/go-runewidth v0.0.13 - golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 + golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e ) go 1.14 diff --git a/vendor/github.com/vbauerster/mpb/v7/go.sum b/vendor/github.com/vbauerster/mpb/v7/go.sum index 59051bd7bfe..45584e0bf34 100644 --- a/vendor/github.com/vbauerster/mpb/v7/go.sum +++ b/vendor/github.com/vbauerster/mpb/v7/go.sum @@ -6,5 +6,5 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4 github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go index a8ef8be1250..4bc36f5ba8f 100644 --- a/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go @@ -3,7 +3,7 @@ package internal import "math" // Percentage is a helper function, to calculate percentage. -func Percentage(total, current int64, width int) float64 { +func Percentage(total, current int64, width uint) float64 { if total <= 0 { return 0 } @@ -14,6 +14,6 @@ func Percentage(total, current int64, width int) float64 { } // PercentageRound same as Percentage but with math.Round. -func PercentageRound(total, current int64, width int) float64 { +func PercentageRound(total, current int64, width uint) float64 { return math.Round(Percentage(total, current, width)) } diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go index b2017f3f064..c60c6569406 100644 --- a/vendor/github.com/vbauerster/mpb/v7/progress.go +++ b/vendor/github.com/vbauerster/mpb/v7/progress.go @@ -19,7 +19,7 @@ import ( const ( // default RefreshRate - prr = 120 * time.Millisecond + prr = 150 * time.Millisecond ) // Progress represents a container that renders one or more progress @@ -157,27 +157,40 @@ func (p *Progress) dropBar(b *Bar) { } } -func (p *Progress) setBarPriority(b *Bar, priority int) { +func (p *Progress) traverseBars(cb func(b *Bar) bool) { + done := make(chan struct{}) select { case p.operateState <- func(s *pState) { - if b.index < 0 { - return + for i := 0; i < s.bHeap.Len(); i++ { + bar := s.bHeap[i] + if !cb(bar) { + break + } } - b.priority = priority - heap.Fix(&s.bHeap, b.index) + close(done) }: + <-done case <-p.done: } } // UpdateBarPriority same as *Bar.SetPriority(int). func (p *Progress) UpdateBarPriority(b *Bar, priority int) { - p.setBarPriority(b, priority) + select { + case p.operateState <- func(s *pState) { + if b.index < 0 { + return + } + b.priority = priority + heap.Fix(&s.bHeap, b.index) + }: + case <-p.done: + } } // BarCount returns bars count. func (p *Progress) BarCount() int { - result := make(chan int, 1) + result := make(chan int) select { case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }: return <-result @@ -222,7 +235,7 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { p.dlogger.Println(err) } case <-s.shutdownNotifier: - if s.heapUpdated { + for s.heapUpdated { if err := s.render(cw); err != nil { p.dlogger.Println(err) } @@ -291,11 +304,12 @@ func (s *pState) render(cw *cwriter.Writer) error { } func (s *pState) flush(cw *cwriter.Writer) error { - var lineCount int - bm := make(map[*Bar]struct{}, s.bHeap.Len()) + var totalLines int + bm := make(map[*Bar]int, s.bHeap.Len()) for s.bHeap.Len() > 0 { b := heap.Pop(&s.bHeap).(*Bar) - cw.ReadFrom(<-b.frameCh) + frame := <-b.frameCh + cw.ReadFrom(frame.reader) if b.toShutdown { if b.recoveredPanic != nil { s.barShutdownQueue = append(s.barShutdownQueue, b) @@ -308,8 +322,8 @@ func (s *pState) flush(cw *cwriter.Writer) error { }() } } - lineCount += b.extendedLines + 1 - bm[b] = struct{}{} + bm[b] = frame.lines + totalLines += frame.lines } for _, b := range s.barShutdownQueue { @@ -320,7 +334,7 @@ func (s *pState) flush(cw *cwriter.Writer) error { b.toDrop = true } if s.popCompleted && !b.noPop { - lineCount -= b.extendedLines + 1 + totalLines -= bm[b] b.toDrop = true } if b.toDrop { @@ -335,7 +349,7 @@ func (s *pState) flush(cw *cwriter.Writer) error { heap.Push(&s.bHeap, b) } - return cw.Flush(lineCount) + return cw.Flush(totalLines) } func (s *pState) updateSyncMatrix() { diff --git a/vendor/modules.txt b/vendor/modules.txt index 1b351615f92..49f2160a4d8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -186,7 +186,7 @@ github.com/containers/buildah/pkg/overlay github.com/containers/buildah/pkg/parse github.com/containers/buildah/pkg/rusage github.com/containers/buildah/util -# github.com/containers/common v0.43.2 +# github.com/containers/common v0.45.0 ## explicit github.com/containers/common/libimage github.com/containers/common/libimage/manifests @@ -216,7 +216,7 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.15.2 +# github.com/containers/image/v5 v5.16.0 ## explicit github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -233,6 +233,7 @@ github.com/containers/image/v5/internal/blobinfocache github.com/containers/image/v5/internal/iolimits github.com/containers/image/v5/internal/pkg/keyctl github.com/containers/image/v5/internal/pkg/platform +github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/rootless github.com/containers/image/v5/internal/tmpdir github.com/containers/image/v5/internal/types @@ -705,7 +706,7 @@ github.com/miekg/pkcs11 github.com/mistifyio/go-zfs # github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.4.1 +# github.com/mitchellh/mapstructure v1.4.2 github.com/mitchellh/mapstructure # github.com/mmarkdown/mmark v2.0.40+incompatible github.com/mmarkdown/mmark/mast @@ -784,7 +785,7 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.2-0.20210708142037-083f635f2b04 +# github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 ## explicit github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 @@ -929,7 +930,7 @@ github.com/urfave/cli/v2 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v7 v7.0.3 +# github.com/vbauerster/mpb/v7 v7.1.3 github.com/vbauerster/mpb/v7 github.com/vbauerster/mpb/v7/cwriter github.com/vbauerster/mpb/v7/decor